From 1794a45572a30a493a5c10fcdc45ba0fa3ac4218 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 4 Jan 2023 10:46:27 +0700 Subject: stage2: sparc64: Add stub for c_va_* --- src/arch/sparc64/CodeGen.zig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'src/arch') diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index cc5c9e9832..58b384c832 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -720,10 +720,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .error_set_has_value => @panic("TODO implement error_set_has_value"), .vector_store_elem => @panic("TODO implement vector_store_elem"), - .c_va_arg => @panic("TODO implement c_va_arg"), - .c_va_copy => @panic("TODO implement c_va_copy"), - .c_va_end => @panic("TODO implement c_va_end"), - .c_va_start => @panic("TODO implement c_va_start"), + .c_va_arg => return self.fail("TODO implement c_va_arg", .{}), + .c_va_copy => return self.fail("TODO implement c_va_copy", .{}), + .c_va_end => return self.fail("TODO implement c_va_end", .{}), + .c_va_start => return self.fail("TODO implement c_va_start", .{}), .wasm_memory_size => unreachable, .wasm_memory_grow => unreachable, -- cgit v1.2.3 From cc2a5185d631b8b33eb5d466cc52ceb092435fd4 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 4 Jan 2023 10:58:40 +0700 Subject: stage2: sparc64: Implement airStructFieldPtr --- src/arch/sparc64/CodeGen.zig | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'src/arch') diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 58b384c832..d682ce8c2b 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -595,7 +595,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ret_load => try self.airRetLoad(inst), .store => try self.airStore(inst, false), .store_safe => try self.airStore(inst, true), - .struct_field_ptr=> @panic("TODO try self.airStructFieldPtr(inst)"), + .struct_field_ptr=> try self.airStructFieldPtr(inst), .struct_field_val=> try self.airStructFieldVal(inst), .array_to_slice => try self.airArrayToSlice(inst), .int_to_float => try self.airIntToFloat(inst), @@ -2425,6 +2425,13 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; + const result = try self.structFieldPtr(inst, extra.struct_operand, extra.field_index); + return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); +} + fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = try self.structFieldPtr(inst, ty_op.operand, index); -- cgit v1.2.3 From 486ab3852e22f8d5ba474691a5b068f1f1729f2e Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 4 Jan 2023 12:53:11 +0700 Subject: stage2: sparc64: Factor machine offset calculation --- src/arch/sparc64/CodeGen.zig | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'src/arch') diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index d682ce8c2b..3d82cad4ec 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -141,6 +141,8 @@ const MCValue = union(enum) { /// The value is one of the stack variables. /// If the type is a pointer, it means the pointer address is in the stack at this offset. /// Note that this stores the plain value (i.e without the effects of the stack bias). + /// Always convert this value into machine offsets with realStackOffset() before + /// lowering into asm! stack_offset: u32, /// The value is a pointer to one of the stack variables (payload is stack offset). ptr_stack_offset: u32, @@ -3651,7 +3653,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .ptr_stack_offset => |off| { - const real_offset = off + abi.stack_bias + abi.stack_reserved_area; + const real_offset = realStackOffset(off); const simm13 = math.cast(i13, real_offset) orelse return self.fail("TODO larger stack offsets: {}", .{real_offset}); @@ -3783,7 +3785,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void try self.genLoad(reg, reg, i13, 0, ty.abiSize(self.target.*)); }, .stack_offset => |off| { - const real_offset = off + abi.stack_bias + abi.stack_reserved_area; + const real_offset = realStackOffset(off); const simm13 = math.cast(i13, real_offset) orelse return self.fail("TODO larger stack offsets: {}", .{real_offset}); try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*)); @@ -3817,7 +3819,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .register => |reg| { - const real_offset = stack_offset + abi.stack_bias + abi.stack_reserved_area; + const real_offset = realStackOffset(stack_offset); const simm13 = math.cast(i13, real_offset) orelse return self.fail("TODO larger stack offsets: {}", .{real_offset}); return self.genStore(reg, .sp, i13, simm13, abi_size); @@ -4252,6 +4254,17 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void { } } +/// Turns stack_offset MCV into a real SPARCv9 stack offset usable for asm. +fn realStackOffset(off: u32) u32 { + return off + // SPARCv9 %sp points away from the stack by some amount. + + abi.stack_bias + // The first couple bytes of each stack frame is reserved + // for ABI and hardware purposes. + + abi.stack_reserved_area; + // Only after that we have the usable stack frame portion. +} + /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { const cc = fn_ty.fnCallingConvention(); -- cgit v1.2.3 From 83e6223192acd635275e67614e55b7a4c579a969 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 4 Jan 2023 16:38:15 +0700 Subject: stage2: sparc64: Implement airByteSwap --- src/arch/sparc64/CodeGen.zig | 156 +++++++++++++++++++++++++++++++++++++++++-- src/arch/sparc64/Emit.zig | 10 +++ src/arch/sparc64/Mir.zig | 30 +++++++++ 3 files changed, 189 insertions(+), 7 deletions(-) (limited to 'src/arch') diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 3d82cad4ec..a4fa7e179c 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -22,6 +22,7 @@ const Type = @import("../../type.zig").Type; const CodeGenError = codegen.CodeGenError; const Result = @import("../../codegen.zig").Result; const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const Endian = std.builtin.Endian; const build_options = @import("build_options"); @@ -30,6 +31,7 @@ const abi = @import("abi.zig"); const errUnionPayloadOffset = codegen.errUnionPayloadOffset; const errUnionErrorOffset = codegen.errUnionErrorOffset; const Instruction = bits.Instruction; +const ASI = Instruction.ASI; const ShiftWidth = Instruction.ShiftWidth; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; @@ -615,7 +617,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .clz => try self.airClz(inst), .ctz => try self.airCtz(inst), .popcount => try self.airPopcount(inst), - .byte_swap => @panic("TODO try self.airByteSwap(inst)"), + .byte_swap => try self.airByteSwap(inst), .bit_reverse => try self.airBitReverse(inst), .tag_name => try self.airTagName(inst), .error_name => try self.airErrorName(inst), @@ -1200,6 +1202,90 @@ fn airBreakpoint(self: *Self) !void { return self.finishAirBookkeeping(); } +fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + + // We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you. + // That being said, the strategy to lower this is: + // - If src is an immediate, comptime-swap it. + // - If src is in memory then issue an LD*A with #ASI_P_[oppposite-endian] + // - If src is a register then issue an ST*A with #ASI_P_[oppposite-endian] + // to a stack slot, then follow with a normal load from said stack slot. + // This is because on some implementations, ASI-tagged memory operations are non-piplelinable + // and loads tend to have longer latency than stores, so the sequence will minimize stall. + // The result will always be either another immediate or stored in a register. + // TODO: Fold byteswap+store into a single ST*A and load+byteswap into a single LD*A. + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(ty_op.operand); + const operand_ty = self.air.typeOf(ty_op.operand); + switch (operand_ty.zigTypeTag()) { + .Vector => return self.fail("TODO byteswap for vectors", .{}), + .Int => { + const int_info = operand_ty.intInfo(self.target.*); + if (int_info.bits == 8) break :result operand; + + const abi_size = int_info.bits >> 3; + const abi_align = operand_ty.abiAlignment(self.target.*); + const opposite_endian_asi = switch (self.target.cpu.arch.endian()) { + Endian.Big => ASI.asi_primary_little, + Endian.Little => ASI.asi_primary, + }; + + switch (operand) { + .immediate => |imm| { + const swapped = switch (int_info.bits) { + 16 => @byteSwap(@intCast(u16, imm)), + 24 => @byteSwap(@intCast(u24, imm)), + 32 => @byteSwap(@intCast(u32, imm)), + 40 => @byteSwap(@intCast(u40, imm)), + 48 => @byteSwap(@intCast(u48, imm)), + 56 => @byteSwap(@intCast(u56, imm)), + 64 => @byteSwap(@intCast(u64, imm)), + else => return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{}), + }; + break :result .{ .immediate = swapped }; + }, + .register => |reg| { + if (int_info.bits > 64 or @popCount(int_info.bits) != 1) + return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{}); + + const off = try self.allocMem(inst, abi_size, abi_align); + const off_reg = try self.copyToTmpRegister(operand_ty, .{ .immediate = realStackOffset(off) }); + + try self.genStoreASI(reg, .sp, off_reg, abi_size, opposite_endian_asi); + try self.genLoad(reg, .sp, Register, off_reg, abi_size); + break :result reg; + }, + .memory => { + if (int_info.bits > 64 or @popCount(int_info.bits) != 1) + return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{}); + + const addr_reg = try self.copyToTmpRegister(operand_ty, operand); + const dst_reg = try self.register_manager.allocReg(null, gp); + + try self.genLoadASI(dst_reg, addr_reg, .g0, abi_size, opposite_endian_asi); + break :result dst_reg; + }, + .stack_offset => |off| { + if (int_info.bits > 64 or @popCount(int_info.bits) != 1) + return self.fail("TODO synthesize SPARCv9 byteswap for other integer sizes", .{}); + + const off_reg = try self.copyToTmpRegister(operand_ty, .{ .immediate = realStackOffset(off) }); + const dst_reg = try self.register_manager.allocReg(null, gp); + + try self.genLoadASI(dst_reg, .sp, off_reg, abi_size, opposite_endian_asi); + break :result dst_reg; + }, + else => unreachable, + } + }, + else => unreachable, + } + }; + + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { if (modifier == .always_tail) return self.fail("TODO implement tail calls for {}", .{self.target.cpu.arch}); @@ -3583,6 +3669,34 @@ fn genLoad(self: *Self, value_reg: Register, addr_reg: Register, comptime off_ty } } +fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Register, abi_size: u64, asi: ASI) !void { + switch (abi_size) { + 1, 2, 4, 8 => { + const tag: Mir.Inst.Tag = switch (abi_size) { + 1 => .lduba, + 2 => .lduha, + 4 => .lduwa, + 8 => .ldxa, + else => unreachable, // unexpected abi size + }; + + _ = try self.addInst(.{ + .tag = tag, + .data = .{ + .mem_asi = .{ + .rd = value_reg, + .rs1 = addr_reg, + .rs2 = off_reg, + .asi = asi, + }, + }, + }); + }, + 3, 5, 6, 7 => return self.fail("TODO: genLoad for more abi_sizes", .{}), + else => unreachable, + } +} + fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { switch (mcv) { .dead => unreachable, @@ -3942,6 +4056,34 @@ fn genStore(self: *Self, value_reg: Register, addr_reg: Register, comptime off_t } } +fn genStoreASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Register, abi_size: u64, asi: ASI) !void { + switch (abi_size) { + 1, 2, 4, 8 => { + const tag: Mir.Inst.Tag = switch (abi_size) { + 1 => .stba, + 2 => .stha, + 4 => .stwa, + 8 => .stxa, + else => unreachable, // unexpected abi size + }; + + _ = try self.addInst(.{ + .tag = tag, + .data = .{ + .mem_asi = .{ + .rd = value_reg, + .rs1 = addr_reg, + .rs2 = off_reg, + .asi = asi, + }, + }, + }); + }, + 3, 5, 6, 7 => return self.fail("TODO: genLoad for more abi_sizes", .{}), + else => unreachable, + } +} + fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { const mcv: MCValue = switch (try codegen.genTypedValue( self.bin_file, @@ -4257,12 +4399,12 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void { /// Turns stack_offset MCV into a real SPARCv9 stack offset usable for asm. fn realStackOffset(off: u32) u32 { return off - // SPARCv9 %sp points away from the stack by some amount. - + abi.stack_bias - // The first couple bytes of each stack frame is reserved - // for ABI and hardware purposes. - + abi.stack_reserved_area; - // Only after that we have the usable stack frame portion. + // SPARCv9 %sp points away from the stack by some amount. + + abi.stack_bias + // The first couple bytes of each stack frame is reserved + // for ABI and hardware purposes. + + abi.stack_reserved_area; + // Only after that we have the usable stack frame portion. } /// Caller must call `CallMCValues.deinit`. diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index 7e71492af7..c0dbab5a14 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -91,6 +91,11 @@ pub fn emitMir( .lduw => try emit.mirArithmetic3Op(inst), .ldx => try emit.mirArithmetic3Op(inst), + .lduba => unreachable, + .lduha => unreachable, + .lduwa => unreachable, + .ldxa => unreachable, + .@"and" => try emit.mirArithmetic3Op(inst), .@"or" => try emit.mirArithmetic3Op(inst), .xor => try emit.mirArithmetic3Op(inst), @@ -127,6 +132,11 @@ pub fn emitMir( .stw => try emit.mirArithmetic3Op(inst), .stx => try emit.mirArithmetic3Op(inst), + .stba => unreachable, + .stha => unreachable, + .stwa => unreachable, + .stxa => unreachable, + .sub => try emit.mirArithmetic3Op(inst), .subcc => try emit.mirArithmetic3Op(inst), diff --git a/src/arch/sparc64/Mir.zig b/src/arch/sparc64/Mir.zig index f854152a2f..f9a4056705 100644 --- a/src/arch/sparc64/Mir.zig +++ b/src/arch/sparc64/Mir.zig @@ -15,6 +15,7 @@ const bits = @import("bits.zig"); const Air = @import("../../Air.zig"); const Instruction = bits.Instruction; +const ASI = bits.Instruction.ASI; const Register = bits.Register; instructions: std.MultiArrayList(Inst).Slice, @@ -70,6 +71,16 @@ pub const Inst = struct { lduw, ldx, + /// A.28 Load Integer from Alternate Space + /// This uses the mem_asi field. + /// Note that the ldda variant of this instruction is deprecated, so do not emit + /// it unless specifically requested (e.g. by inline assembly). + // TODO add other operations. + lduba, + lduha, + lduwa, + ldxa, + /// A.31 Logical Operations /// This uses the arithmetic_3op field. // TODO add other operations. @@ -132,6 +143,16 @@ pub const Inst = struct { stw, stx, + /// A.55 Store Integer into Alternate Space + /// This uses the mem_asi field. + /// Note that the stda variant of this instruction is deprecated, so do not emit + /// it unless specifically requested (e.g. by inline assembly). + // TODO add other operations. + stba, + stha, + stwa, + stxa, + /// A.56 Subtract /// This uses the arithmetic_3op field. // TODO add other operations. @@ -241,6 +262,15 @@ pub const Inst = struct { inst: Index, }, + /// ASI-tagged memory operations. + /// Used by e.g. ldxa, stxa + mem_asi: struct { + rd: Register, + rs1: Register, + rs2: Register = .g0, + asi: ASI, + }, + /// Membar mask, controls the barrier behavior /// Used by e.g. membar membar_mask: struct { -- cgit v1.2.3 From 75a1360cdd1745ac267c9a25b84ec7ee765d9262 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Mon, 20 Feb 2023 22:40:08 +0700 Subject: stage2: sparc64: Implement ASI load/store ops --- src/arch/sparc64/CodeGen.zig | 6 +++--- src/arch/sparc64/Emit.zig | 39 +++++++++++++++++++++++++++++++-------- src/arch/sparc64/bits.zig | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 11 deletions(-) (limited to 'src/arch') diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index a4fa7e179c..7c9476161c 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1254,7 +1254,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { try self.genStoreASI(reg, .sp, off_reg, abi_size, opposite_endian_asi); try self.genLoad(reg, .sp, Register, off_reg, abi_size); - break :result reg; + break :result .{ .register = reg }; }, .memory => { if (int_info.bits > 64 or @popCount(int_info.bits) != 1) @@ -1264,7 +1264,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const dst_reg = try self.register_manager.allocReg(null, gp); try self.genLoadASI(dst_reg, addr_reg, .g0, abi_size, opposite_endian_asi); - break :result dst_reg; + break :result .{ .register = dst_reg }; }, .stack_offset => |off| { if (int_info.bits > 64 or @popCount(int_info.bits) != 1) @@ -1274,7 +1274,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const dst_reg = try self.register_manager.allocReg(null, gp); try self.genLoadASI(dst_reg, .sp, off_reg, abi_size, opposite_endian_asi); - break :result dst_reg; + break :result .{ .register = dst_reg }; }, else => unreachable, } diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index c0dbab5a14..7d16105348 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -91,10 +91,10 @@ pub fn emitMir( .lduw => try emit.mirArithmetic3Op(inst), .ldx => try emit.mirArithmetic3Op(inst), - .lduba => unreachable, - .lduha => unreachable, - .lduwa => unreachable, - .ldxa => unreachable, + .lduba => try emit.mirMemASI(inst), + .lduha => try emit.mirMemASI(inst), + .lduwa => try emit.mirMemASI(inst), + .ldxa => try emit.mirMemASI(inst), .@"and" => try emit.mirArithmetic3Op(inst), .@"or" => try emit.mirArithmetic3Op(inst), @@ -132,10 +132,10 @@ pub fn emitMir( .stw => try emit.mirArithmetic3Op(inst), .stx => try emit.mirArithmetic3Op(inst), - .stba => unreachable, - .stha => unreachable, - .stwa => unreachable, - .stxa => unreachable, + .stba => try emit.mirMemASI(inst), + .stha => try emit.mirMemASI(inst), + .stwa => try emit.mirMemASI(inst), + .stxa => try emit.mirMemASI(inst), .sub => try emit.mirArithmetic3Op(inst), .subcc => try emit.mirArithmetic3Op(inst), @@ -378,6 +378,29 @@ fn mirConditionalMove(emit: *Emit, inst: Mir.Inst.Index) !void { } } +fn mirMemASI(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const data = emit.mir.instructions.items(.data)[inst].mem_asi; + + const rd = data.rd; + const rs1 = data.rs1; + const rs2 = data.rs2; + const asi = data.asi; + + switch (tag) { + .lduba => try emit.writeInstruction(Instruction.lduba(rs1, rs2, asi, rd)), + .lduha => try emit.writeInstruction(Instruction.lduha(rs1, rs2, asi, rd)), + .lduwa => try emit.writeInstruction(Instruction.lduwa(rs1, rs2, asi, rd)), + .ldxa => try emit.writeInstruction(Instruction.ldxa(rs1, rs2, asi, rd)), + + .stba => try emit.writeInstruction(Instruction.stba(rs1, rs2, asi, rd)), + .stha => try emit.writeInstruction(Instruction.stha(rs1, rs2, asi, rd)), + .stwa => try emit.writeInstruction(Instruction.stwa(rs1, rs2, asi, rd)), + .stxa => try emit.writeInstruction(Instruction.stxa(rs1, rs2, asi, rd)), + else => unreachable, + } +} + fn mirMembar(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const mask = emit.mir.instructions.items(.data)[inst].membar_mask; diff --git a/src/arch/sparc64/bits.zig b/src/arch/sparc64/bits.zig index 0446a84d6d..7c943626f9 100644 --- a/src/arch/sparc64/bits.zig +++ b/src/arch/sparc64/bits.zig @@ -1229,6 +1229,22 @@ pub const Instruction = union(enum) { }; } + pub fn lduba(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction { + return format3i(0b11, 0b01_0001, rs1, rs2, rd, asi); + } + + pub fn lduha(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction { + return format3i(0b11, 0b01_0010, rs1, rs2, rd, asi); + } + + pub fn lduwa(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction { + return format3i(0b11, 0b01_0000, rs1, rs2, rd, asi); + } + + pub fn ldxa(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction { + return format3i(0b11, 0b01_1011, rs1, rs2, rd, asi); + } + pub fn @"and"(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { return switch (s2) { Register => format3a(0b10, 0b00_0001, rs1, rs2, rd), @@ -1417,6 +1433,22 @@ pub const Instruction = union(enum) { }; } + pub fn stba(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction { + return format3i(0b11, 0b01_0101, rs1, rs2, rd, asi); + } + + pub fn stha(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction { + return format3i(0b11, 0b01_0110, rs1, rs2, rd, asi); + } + + pub fn stwa(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction { + return format3i(0b11, 0b01_0100, rs1, rs2, rd, asi); + } + + pub fn stxa(rs1: Register, rs2: Register, asi: ASI, rd: Register) Instruction { + return format3i(0b11, 0b01_1110, rs1, rs2, rd, asi); + } + pub fn sub(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { return switch (s2) { Register => format3a(0b10, 0b00_0100, rs1, rs2, rd), -- cgit v1.2.3 From ccc9b8caf632bf97f7765c1b1d7821118cf34008 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Mon, 24 Apr 2023 13:23:17 +0700 Subject: stage2: sparc64: Implement airPtrSliceLenPtr/airPtrSlicePtrPtr stubs --- src/arch/sparc64/CodeGen.zig | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) (limited to 'src/arch') diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 7c9476161c..226a0c6fc9 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -667,8 +667,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .slice_ptr => try self.airSlicePtr(inst), .slice_len => try self.airSliceLen(inst), - .ptr_slice_len_ptr => @panic("TODO try self.airPtrSliceLenPtr(inst)"), - .ptr_slice_ptr_ptr => @panic("TODO try self.airPtrSlicePtrPtr(inst)"), + .ptr_slice_len_ptr => try self.airPtrSliceLenPtr(inst), + .ptr_slice_ptr_ptr => try self.airPtrSlicePtrPtr(inst), .array_elem_val => try self.airArrayElemVal(inst), .slice_elem_val => try self.airSliceElemVal(inst), @@ -2238,6 +2238,38 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } +fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bytes = @divExact(ptr_bits, 8); + const mcv = try self.resolveInst(ty_op.operand); + switch (mcv) { + .dead, .unreach, .none => unreachable, + .ptr_stack_offset => |off| { + break :result MCValue{ .ptr_stack_offset = off - ptr_bytes }; + }, + else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + +fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mcv = try self.resolveInst(ty_op.operand); + switch (mcv) { + .dead, .unreach, .none => unreachable, + .ptr_stack_offset => |off| { + break :result MCValue{ .ptr_stack_offset = off }; + }, + else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); +} + fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result = try self.resolveInst(un_op); -- cgit v1.2.3 From 7c9891d7b7d09060630693231702f27669dd0dc9 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 26 Apr 2023 22:51:35 -0400 Subject: x86_64: use std.log for debug logging --- src/arch/x86_64/CodeGen.zig | 137 ++++++++++++++++++++++++++++++-------------- src/print_air.zig | 12 +++- 2 files changed, 104 insertions(+), 45 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index be972d7aea..b862252561 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -7,6 +7,8 @@ const leb128 = std.leb; const link = @import("../../link.zig"); const log = std.log.scoped(.codegen); const tracking_log = std.log.scoped(.tracking); +const verbose_tracking_log = std.log.scoped(.verbose_tracking); +const wip_mir_log = std.log.scoped(.wip_mir); const math = std.math; const mem = std.mem; const trace = @import("../../tracy.zig").trace; @@ -48,9 +50,6 @@ const sse = abi.RegisterClass.sse; const InnerError = CodeGenError || error{OutOfRegisters}; -const debug_wip_mir = false; -const debug_tracking = false; - gpa: Allocator, air: Air, liveness: Liveness, @@ -575,12 +574,6 @@ pub fn generate( assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; - if (debug_wip_mir) { - const stderr = std.io.getStdErr().writer(); - fn_owner_decl.renderFullyQualifiedName(mod, stderr) catch {}; - stderr.writeAll(":\n") catch {}; - } - const gpa = bin_file.allocator; var function = Self{ .gpa = gpa, @@ -614,6 +607,8 @@ pub fn generate( if (builtin.mode == .Debug) function.mir_to_air_map.deinit(gpa); } + wip_mir_log.debug("{}:", .{function.fmtDecl(module_fn.owner_decl)}); + try function.frame_allocs.resize(gpa, FrameIndex.named_count); function.frame_allocs.set( @enumToInt(FrameIndex.stack_frame), @@ -715,48 +710,104 @@ pub fn generate( } } -fn dumpWipMir(self: *Self, inst: Mir.Inst) !void { - if (!debug_wip_mir) return; - const stderr = std.io.getStdErr().writer(); +const FormatDeclData = struct { + mod: *Module, + decl_index: Module.Decl.Index, +}; +fn formatDecl( + data: FormatDeclData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + try data.mod.declPtr(data.decl_index).renderFullyQualifiedName(data.mod, writer); +} +fn fmtDecl(self: *Self, decl_index: Module.Decl.Index) std.fmt.Formatter(formatDecl) { + return .{ .data = .{ + .mod = self.bin_file.options.module.?, + .decl_index = decl_index, + } }; +} + +const FormatAirData = struct { + self: *Self, + inst: Air.Inst.Index, +}; +fn formatAir( + data: FormatAirData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + @import("../../print_air.zig").dumpInst( + data.inst, + data.self.bin_file.options.module.?, + data.self.air, + data.self.liveness, + ); +} +fn fmtAir(self: *Self, inst: Air.Inst.Index) std.fmt.Formatter(formatAir) { + return .{ .data = .{ .self = self, .inst = inst } }; +} +const FormatWipMirData = struct { + self: *Self, + inst: Mir.Inst.Index, +}; +fn formatWipMir( + data: FormatWipMirData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { var lower = Lower{ - .allocator = self.gpa, + .allocator = data.self.gpa, .mir = .{ - .instructions = self.mir_instructions.slice(), - .extra = self.mir_extra.items, + .instructions = data.self.mir_instructions.slice(), + .extra = data.self.mir_extra.items, .frame_locs = (std.MultiArrayList(Mir.FrameLoc){}).slice(), }, - .target = self.target, - .src_loc = self.src_loc, + .target = data.self.target, + .src_loc = data.self.src_loc, }; - for (lower.lowerMir(inst) catch |err| switch (err) { + for (lower.lowerMir(data.self.mir_instructions.get(data.inst)) catch |err| switch (err) { error.LowerFail => { defer { - lower.err_msg.?.deinit(self.gpa); + lower.err_msg.?.deinit(data.self.gpa); lower.err_msg = null; } - try stderr.print("{s}\n", .{lower.err_msg.?.msg}); + try writer.writeAll(lower.err_msg.?.msg); return; }, - error.InvalidInstruction, error.CannotEncode => |e| { - try stderr.writeAll(switch (e) { - error.InvalidInstruction => "CodeGen failed to find a viable instruction.\n", - error.CannotEncode => "CodeGen failed to encode the instruction.\n", + error.OutOfMemory, error.InvalidInstruction, error.CannotEncode => |e| { + try writer.writeAll(switch (e) { + error.OutOfMemory => "Out of memory", + error.InvalidInstruction => "CodeGen failed to find a viable instruction.", + error.CannotEncode => "CodeGen failed to encode the instruction.", }); return; }, else => |e| return e, - }) |lower_inst| { - try stderr.print(" | {}\n", .{lower_inst}); - } + }) |lower_inst| try writer.print(" | {}", .{lower_inst}); +} +fn fmtWipMir(self: *Self, inst: Mir.Inst.Index) std.fmt.Formatter(formatWipMir) { + return .{ .data = .{ .self = self, .inst = inst } }; } -fn dumpTracking(self: *Self) !void { - if (!debug_tracking) return; - const stderr = std.io.getStdErr().writer(); - - var it = self.inst_tracking.iterator(); - while (it.next()) |entry| try stderr.print("%{d} = {}\n", .{ entry.key_ptr.*, entry.value_ptr.* }); +const FormatTrackingData = struct { + self: *Self, +}; +fn formatTracking( + data: FormatTrackingData, + comptime _: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, +) @TypeOf(writer).Error!void { + var it = data.self.inst_tracking.iterator(); + while (it.next()) |entry| try writer.print("\n%{d} = {}", .{ entry.key_ptr.*, entry.value_ptr.* }); +} +fn fmtTracking(self: *Self) std.fmt.Formatter(formatTracking) { + return .{ .data = .{ .self = self } }; } fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { @@ -764,7 +815,14 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { try self.mir_instructions.ensureUnusedCapacity(gpa, 1); const result_index = @intCast(Mir.Inst.Index, self.mir_instructions.len); self.mir_instructions.appendAssumeCapacity(inst); - self.dumpWipMir(inst) catch {}; + switch (inst.tag) { + else => wip_mir_log.debug("{}", .{self.fmtWipMir(result_index)}), + .dbg_line, + .dbg_prologue_end, + .dbg_epilogue_begin, + .dead, + => {}, + } return result_index; } @@ -1186,13 +1244,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) continue; - if (debug_wip_mir) @import("../../print_air.zig").dumpInst( - inst, - self.bin_file.options.module.?, - self.air, - self.liveness, - ); - self.dumpTracking() catch {}; + wip_mir_log.debug("{}", .{self.fmtAir(inst)}); + verbose_tracking_log.debug("{}", .{self.fmtTracking()}); const old_air_bookkeeping = self.air_bookkeeping; try self.inst_tracking.ensureUnusedCapacity(self.gpa, 1); @@ -1453,7 +1506,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } } } - self.dumpTracking() catch {}; + verbose_tracking_log.debug("{}", .{self.fmtTracking()}); } fn getValue(self: *Self, value: MCValue, inst: ?Air.Inst.Index) void { diff --git a/src/print_air.zig b/src/print_air.zig index 2d7995842f..d90d31ec67 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -94,14 +94,20 @@ const Writer = struct { for (w.air.instructions.items(.tag), 0..) |tag, i| { const inst = @intCast(Air.Inst.Index, i); switch (tag) { - .constant, .const_ty => try w.writeInst(s, inst), + .constant, .const_ty => { + try w.writeInst(s, inst); + try s.writeByte('\n'); + }, else => continue, } } } fn writeBody(w: *Writer, s: anytype, body: []const Air.Inst.Index) @TypeOf(s).Error!void { - for (body) |inst| try w.writeInst(s, inst); + for (body) |inst| { + try w.writeInst(s, inst); + try s.writeByte('\n'); + } } fn writeInst(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -336,7 +342,7 @@ const Writer = struct { .work_group_id, => try w.writeWorkDimension(s, inst), } - try s.writeAll(")\n"); + try s.writeByte(')'); } fn writeBinOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { -- cgit v1.2.3 From 4ec49da5f6a6a8e77cdb66b8f814718bf11fffef Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 28 Apr 2023 20:39:38 -0400 Subject: x86_64: implement a bunch of floating point stuff --- src/arch/x86_64/CodeGen.zig | 438 ++++++++++++++++++++++++++++++++++-------- src/arch/x86_64/Encoding.zig | 27 +-- src/arch/x86_64/Lower.zig | 6 + src/arch/x86_64/Mir.zig | 12 ++ src/arch/x86_64/encoder.zig | 2 +- src/arch/x86_64/encodings.zig | 14 ++ src/codegen.zig | 103 +++++++--- 7 files changed, 486 insertions(+), 116 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b862252561..d9c7298c95 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1297,9 +1297,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ceil, .round, .trunc_float, - .neg, => try self.airUnaryMath(inst), + .neg => try self.airNeg(inst), + .add_with_overflow => try self.airAddSubWithOverflow(inst), .sub_with_overflow => try self.airAddSubWithOverflow(inst), .mul_with_overflow => try self.airMulWithOverflow(inst), @@ -1881,7 +1882,7 @@ pub fn spillRegisters(self: *Self, registers: []const Register) !void { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg: Register = try self.register_manager.allocReg(null, try self.regClassForType(ty)); + const reg = try self.register_manager.allocReg(null, try self.regClassForType(ty)); try self.genSetReg(reg, ty, mcv); return reg; } @@ -1924,16 +1925,48 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - _ = ty_op; - return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + const dst_ty = self.air.typeOfIndex(inst); + const src_ty = self.air.typeOf(ty_op.operand); + if (dst_ty.floatBits(self.target.*) != 32 or src_ty.floatBits(self.target.*) != 64 or + !Target.x86.featureSetHas(self.target.cpu.features, .sse2)) + return self.fail("TODO implement airFptrunc from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), + dst_ty.fmt(self.bin_file.options.module.?), + }); + + const src_mcv = try self.resolveInst(ty_op.operand); + const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) + src_mcv + else + try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); + const dst_lock = self.register_manager.lockReg(dst_mcv.register); + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + + try self.genBinOpMir(.cvtsd2ss, src_ty, dst_mcv, src_mcv); + return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } fn airFpext(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - _ = ty_op; - return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch}); - // return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + const dst_ty = self.air.typeOfIndex(inst); + const src_ty = self.air.typeOf(ty_op.operand); + if (dst_ty.floatBits(self.target.*) != 64 or src_ty.floatBits(self.target.*) != 32 or + !Target.x86.featureSetHas(self.target.cpu.features, .sse2)) + return self.fail("TODO implement airFpext from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), + dst_ty.fmt(self.bin_file.options.module.?), + }); + + const src_mcv = try self.resolveInst(ty_op.operand); + const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) + src_mcv + else + try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); + const dst_lock = self.register_manager.lockReg(dst_mcv.register); + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + + try self.genBinOpMir(.cvtss2sd, src_ty, dst_mcv, src_mcv); + return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { @@ -3953,10 +3986,65 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } +fn airNeg(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const ty = self.air.typeOf(un_op); + const ty_bits = ty.floatBits(self.target.*); + + var arena = std.heap.ArenaAllocator.init(self.gpa); + defer arena.deinit(); + + const ExpectedContents = union { + f16: Value.Payload.Float_16, + f32: Value.Payload.Float_32, + f64: Value.Payload.Float_64, + f80: Value.Payload.Float_80, + f128: Value.Payload.Float_128, + }; + var stack align(@alignOf(ExpectedContents)) = + std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); + + var vec_pl = Type.Payload.Array{ + .base = .{ .tag = .vector }, + .data = .{ + .len = @divExact(128, ty_bits), + .elem_type = ty, + }, + }; + const vec_ty = Type.initPayload(&vec_pl.base); + + var sign_pl = Value.Payload.SubValue{ + .base = .{ .tag = .repeated }, + .data = try Value.floatToValue(-0.0, stack.get(), ty, self.target.*), + }; + const sign_val = Value.initPayload(&sign_pl.base); + + const sign_mcv = try self.genTypedValue(.{ .ty = vec_ty, .val = sign_val }); + + const src_mcv = try self.resolveInst(un_op); + const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) + src_mcv + else + try self.copyToRegisterWithInstTracking(inst, ty, src_mcv); + const dst_lock = self.register_manager.lockReg(dst_mcv.register); + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + + try self.genBinOpMir(switch (ty_bits) { + 32 => .xorps, + 64 => .xorpd, + else => return self.fail("TODO implement airNeg for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }, vec_ty, dst_mcv, sign_mcv); + return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); +} + fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; _ = un_op; - return self.fail("TODO implement airUnaryMath for {}", .{self.target.cpu.arch}); + return self.fail("TODO implement airUnaryMath for {}", .{ + self.air.instructions.items(.tag)[inst], + }); //return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -4109,7 +4197,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); const result: MCValue = result: { if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; @@ -4117,14 +4204,20 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); + const ptr_ty = self.air.typeOf(ty_op.operand); + const elem_size = elem_ty.abiSize(self.target.*); + + const elem_rc = try self.regClassForType(elem_ty); + const ptr_rc = try self.regClassForType(ptr_ty); + const ptr_mcv = try self.resolveInst(ty_op.operand); - const dst_mcv = if (elem_size <= 8 and self.reuseOperand(inst, ty_op.operand, 0, ptr_mcv)) + const dst_mcv = if (elem_size <= 8 and elem_rc.supersetOf(ptr_rc) and + self.reuseOperand(inst, ty_op.operand, 0, ptr_mcv)) // The MCValue that holds the pointer can be re-used as the value. ptr_mcv else try self.allocRegOrMem(inst, true); - const ptr_ty = self.air.typeOf(ty_op.operand); if (ptr_ty.ptrInfo().data.host_size > 0) { try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv); } else { @@ -4346,17 +4439,9 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { switch (src_mcv) { .load_frame => |frame_addr| { - const field_abi_size = @intCast(u32, field_ty.abiSize(self.target.*)); - const limb_abi_size = @min(field_abi_size, 8); - const limb_abi_bits = limb_abi_size * 8; - const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); - const field_bit_off = field_off % limb_abi_bits; - - if (field_bit_off == 0) { - const off_mcv = MCValue{ .load_frame = .{ - .index = frame_addr.index, - .off = frame_addr.off + field_byte_off, - } }; + if (field_off % 8 == 0) { + const off_mcv = + src_mcv.address().offset(@intCast(i32, @divExact(field_off, 8))).deref(); if (self.reuseOperand(inst, operand, 0, src_mcv)) break :result off_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); @@ -4364,6 +4449,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } + const field_abi_size = @intCast(u32, field_ty.abiSize(self.target.*)); + const limb_abi_size = @min(field_abi_size, 8); + const limb_abi_bits = limb_abi_size * 8; + const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); + const field_bit_off = field_off % limb_abi_bits; + if (field_abi_size > 8) { return self.fail("TODO implement struct_field_val with large packed field", .{}); } @@ -5181,24 +5272,69 @@ fn genBinOp( switch (tag) { .add, .addwrap, - => try self.genBinOpMir(switch (lhs_ty.tag()) { + => try self.genBinOpMir(switch (lhs_ty.zigTypeTag()) { else => .add, - .f32 => .addss, - .f64 => .addsd, + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) + .addss + else + return self.fail("TODO implement genBinOp for {s} {} without sse", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) + .addsd + else + return self.fail("TODO implement genBinOp for {s} {} without sse2", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + }, }, lhs_ty, dst_mcv, src_mcv), .sub, .subwrap, - => try self.genBinOpMir(switch (lhs_ty.tag()) { + => try self.genBinOpMir(switch (lhs_ty.zigTypeTag()) { else => .sub, - .f32 => .subss, - .f64 => .subsd, + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) + .subss + else + return self.fail("TODO implement genBinOp for {s} {} without sse", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) + .subsd + else + return self.fail("TODO implement genBinOp for {s} {} without sse2", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + }, }, lhs_ty, dst_mcv, src_mcv), - .mul => try self.genBinOpMir(switch (lhs_ty.tag()) { - .f32 => .mulss, - .f64 => .mulsd, + .mul => try self.genBinOpMir(switch (lhs_ty.zigTypeTag()) { else => return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?) }), + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) + .mulss + else + return self.fail("TODO implement genBinOp for {s} {} without sse", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) + .mulsd + else + return self.fail("TODO implement genBinOp for {s} {} without sse2", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + }, }, lhs_ty, dst_mcv, src_mcv), .div_float, @@ -5206,12 +5342,27 @@ fn genBinOp( .div_trunc, .div_floor, => { - try self.genBinOpMir(switch (lhs_ty.tag()) { - .f32 => .divss, - .f64 => .divsd, + try self.genBinOpMir(switch (lhs_ty.zigTypeTag()) { else => return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), }), + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) + .divss + else + return self.fail("TODO implement genBinOp for {s} {} without sse", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) + .divsd + else + return self.fail("TODO implement genBinOp for {s} {} without sse2", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + }, }, lhs_ty, dst_mcv, src_mcv); switch (tag) { .div_float, @@ -5222,16 +5373,18 @@ fn genBinOp( => if (Target.x86.featureSetHas(self.target.cpu.features, .sse4_1)) { const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*)); const dst_alias = registerAlias(dst_mcv.register, abi_size); - try self.asmRegisterRegisterImmediate(switch (lhs_ty.tag()) { - .f32 => .roundss, - .f64 => .roundsd, + try self.asmRegisterRegisterImmediate(switch (lhs_ty.floatBits(self.target.*)) { + 32 => .roundss, + 64 => .roundsd, else => unreachable, }, dst_alias, dst_alias, Immediate.u(switch (tag) { .div_trunc => 0b1_0_11, .div_floor => 0b1_0_01, else => unreachable, })); - } else return self.fail("TODO implement round without sse4_1", .{}), + } else return self.fail("TODO implement genBinOp for {s} {} without sse4_1", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), else => unreachable, } }, @@ -5453,39 +5606,68 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s )), else => unreachable, }, - .register_offset, .eflags, + .register_offset, .memory, + .indirect, .load_direct, .lea_direct, .load_got, .lea_got, .load_tlv, .lea_tlv, + .load_frame, .lea_frame, => { - assert(abi_size <= 8); + blk: { + return self.asmRegisterMemory( + mir_tag, + registerAlias(dst_reg, abi_size), + Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) { + .memory => |addr| .{ + .base = .{ .reg = .ds }, + .disp = math.cast(i32, addr) orelse break :blk, + }, + .indirect => |reg_off| .{ + .base = .{ .reg = reg_off.reg }, + .disp = reg_off.off, + }, + .load_frame => |frame_addr| .{ + .base = .{ .frame = frame_addr.index }, + .disp = frame_addr.off, + }, + else => break :blk, + }), + ); + } + const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); - const reg = try self.copyToTmpRegister(ty, src_mcv); - return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .register = reg }); - }, - .indirect, .load_frame => try self.asmRegisterMemory( - mir_tag, - registerAlias(dst_reg, abi_size), - Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) { - .indirect => |reg_off| .{ - .base = .{ .reg = reg_off.reg }, - .disp = reg_off.off, + switch (src_mcv) { + .eflags, + .register_offset, + .lea_direct, + .lea_got, + .lea_tlv, + .lea_frame, + => { + const reg = try self.copyToTmpRegister(ty, src_mcv); + return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .register = reg }); }, - .load_frame => |frame_addr| .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, + .memory, + .load_direct, + .load_got, + .load_tlv, + => { + const addr_reg = try self.copyToTmpRegister(ty, src_mcv.address()); + return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ + .indirect = .{ .reg = addr_reg }, + }); }, else => unreachable, - }), - ), + } + }, } }, .memory, .indirect, .load_got, .load_direct, .load_tlv, .load_frame => { @@ -6175,10 +6357,25 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const src_mcv = if (flipped) lhs_mcv else rhs_mcv; - try self.genBinOpMir(switch (ty.tag()) { + try self.genBinOpMir(switch (ty.zigTypeTag()) { else => .cmp, - .f32 => .ucomiss, - .f64 => .ucomisd, + .Float => switch (ty.floatBits(self.target.*)) { + 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) + .ucomiss + else + return self.fail("TODO implement airCmp for {} without sse", .{ + ty.fmt(self.bin_file.options.module.?), + }), + 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) + .ucomisd + else + return self.fail("TODO implement airCmp for {} without sse2", .{ + ty.fmt(self.bin_file.options.module.?), + }), + else => return self.fail("TODO implement airCmp for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }, }, ty, dst_mcv, src_mcv); const signedness = if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned; @@ -7608,7 +7805,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const dst_rc = try self.regClassForType(dst_ty); const src_rc = try self.regClassForType(src_ty); const operand = try self.resolveInst(ty_op.operand); - if (dst_rc.eql(src_rc) and self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand; + if (dst_rc.supersetOf(src_rc) and self.reuseOperand(inst, ty_op.operand, 0, operand)) + break :result operand; const operand_lock = switch (operand) { .register => |reg| self.register_manager.lockReg(reg), @@ -7648,9 +7846,59 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - _ = ty_op; - return self.fail("TODO implement airIntToFloat for {}", .{self.target.cpu.arch}); - //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + + const src_ty = self.air.typeOf(ty_op.operand); + const src_bits = @intCast(u32, src_ty.bitSize(self.target.*)); + const src_signedness = + if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + const dst_ty = self.air.typeOfIndex(inst); + + const src_size = std.math.divCeil(u32, @max(switch (src_signedness) { + .signed => src_bits, + .unsigned => src_bits + 1, + }, 32), 8) catch unreachable; + if (src_size > 8) return self.fail("TODO implement airIntToFloat from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), + dst_ty.fmt(self.bin_file.options.module.?), + }); + + const src_mcv = try self.resolveInst(ty_op.operand); + const src_reg = switch (src_mcv) { + .register => |reg| reg, + else => try self.copyToTmpRegister(src_ty, src_mcv), + }; + const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); + defer self.register_manager.unlockReg(src_lock); + + if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg); + + const dst_reg = try self.register_manager.allocReg(inst, try self.regClassForType(dst_ty)); + const dst_mcv = MCValue{ .register = dst_reg }; + const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); + defer self.register_manager.unlockReg(dst_lock); + + try self.asmRegisterRegister(switch (dst_ty.floatBits(self.target.*)) { + 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) + .cvtsi2ss + else + return self.fail("TODO implement airIntToFloat from {} to {} without sse", .{ + src_ty.fmt(self.bin_file.options.module.?), + dst_ty.fmt(self.bin_file.options.module.?), + }), + 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) + .cvtsi2sd + else + return self.fail("TODO implement airIntToFloat from {} to {} without sse2", .{ + src_ty.fmt(self.bin_file.options.module.?), + dst_ty.fmt(self.bin_file.options.module.?), + }), + else => return self.fail("TODO implement airIntToFloat from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), + dst_ty.fmt(self.bin_file.options.module.?), + }), + }, dst_reg.to128(), registerAlias(src_reg, src_size)); + + return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { @@ -8717,6 +8965,7 @@ fn resolveCallingConventionValues( }, .C => { var param_reg_i: usize = 0; + var param_sse_reg_i: usize = 0; result.stack_align = 16; switch (self.target.os.tag) { @@ -8734,26 +8983,39 @@ fn resolveCallingConventionValues( // TODO: is this even possible for C calling convention? result.return_value = InstTracking.init(.none); } else { - const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0]; - const ret_ty_size = @intCast(u31, ret_ty.abiSize(self.target.*)); - if (ret_ty_size <= 8) { - const aliased_reg = registerAlias(ret_reg, ret_ty_size); - result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none }; - } else { - const ret_indirect_reg = abi.getCAbiIntParamRegs(self.target.*)[param_reg_i]; - param_reg_i += 1; - result.return_value = .{ - .short = .{ .indirect = .{ .reg = ret_reg } }, - .long = .{ .indirect = .{ .reg = ret_indirect_reg } }, - }; + const classes = switch (self.target.os.tag) { + .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, self.target.*)}, + else => mem.sliceTo(&abi.classifySystemV(ret_ty, self.target.*, .ret), .none), + }; + if (classes.len > 1) { + return self.fail("TODO handle multiple classes per type", .{}); } + const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0]; + result.return_value = switch (classes[0]) { + .integer => InstTracking.init(.{ .register = registerAlias( + ret_reg, + @intCast(u32, ret_ty.abiSize(self.target.*)), + ) }), + .float, .sse => InstTracking.init(.{ .register = .xmm0 }), + .memory => ret: { + const ret_indirect_reg = abi.getCAbiIntParamRegs(self.target.*)[param_reg_i]; + param_reg_i += 1; + break :ret .{ + .short = .{ .indirect = .{ .reg = ret_reg } }, + .long = .{ .indirect = .{ .reg = ret_indirect_reg } }, + }; + }, + else => |class| return self.fail("TODO handle calling convention class {s}", .{ + @tagName(class), + }), + }; } // Input params for (param_types, result.args) |ty, *arg| { assert(ty.hasRuntimeBitsIgnoreComptime()); - const classes: []const abi.Class = switch (self.target.os.tag) { + const classes = switch (self.target.os.tag) { .windows => &[1]abi.Class{abi.classifyWindows(ty, self.target.*)}, else => mem.sliceTo(&abi.classifySystemV(ty, self.target.*, .arg), .none), }; @@ -8761,13 +9023,29 @@ fn resolveCallingConventionValues( return self.fail("TODO handle multiple classes per type", .{}); } switch (classes[0]) { - .integer => blk: { - if (param_reg_i >= abi.getCAbiIntParamRegs(self.target.*).len) break :blk; - const param_reg = abi.getCAbiIntParamRegs(self.target.*)[param_reg_i]; + .integer => if (param_reg_i < abi.getCAbiIntParamRegs(self.target.*).len) { + arg.* = .{ .register = abi.getCAbiIntParamRegs(self.target.*)[param_reg_i] }; param_reg_i += 1; - arg.* = .{ .register = param_reg }; continue; }, + .float, .sse => switch (self.target.os.tag) { + .windows => if (param_reg_i < 4) { + arg.* = .{ .register = @intToEnum( + Register, + @enumToInt(Register.xmm0) + param_reg_i, + ) }; + param_reg_i += 1; + continue; + }, + else => if (param_sse_reg_i < 8) { + arg.* = .{ .register = @intToEnum( + Register, + @enumToInt(Register.xmm0) + param_sse_reg_i, + ) }; + param_sse_reg_i += 1; + continue; + }, + }, .memory => {}, // fallthrough else => |class| return self.fail("TODO handle calling convention class {s}", .{ @tagName(class), diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index a977af7842..5cb7f7a2d9 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -58,7 +58,7 @@ pub fn findByMnemonic( next: for (mnemonic_to_encodings_map[@enumToInt(mnemonic)]) |data| { switch (data.mode) { .rex => if (!rex_required) continue, - .long, .sse2_long => {}, + .long, .sse_long, .sse2_long => {}, else => if (rex_required) continue, } for (input_ops, data.ops) |input_op, data_op| @@ -90,7 +90,7 @@ pub fn findByOpcode(opc: []const u8, prefixes: struct { if (prefixes.rex.w) { switch (data.mode) { .short, .fpu, .sse, .sse2, .sse4_1, .none => continue, - .long, .sse2_long, .rex => {}, + .long, .sse_long, .sse2_long, .rex => {}, } } else if (prefixes.rex.present and !prefixes.rex.isSet()) { switch (data.mode) { @@ -138,7 +138,7 @@ pub fn modRmExt(encoding: Encoding) u3 { pub fn operandBitSize(encoding: Encoding) u64 { switch (encoding.data.mode) { .short => return 16, - .long, .sse2_long => return 64, + .long, .sse_long, .sse2_long => return 64, else => {}, } const bit_size: u64 = switch (encoding.data.op_en) { @@ -163,7 +163,7 @@ pub fn format( _ = options; _ = fmt; switch (encoding.data.mode) { - .long, .sse2_long => try writer.writeAll("REX.W + "), + .long, .sse_long, .sse2_long => try writer.writeAll("REX.W + "), else => {}, } @@ -269,21 +269,25 @@ pub const Mnemonic = enum { // SSE addss, cmpss, + cvtsi2ss, divss, maxss, minss, movss, mulss, subss, ucomiss, + xorps, // SSE2 addsd, //cmpsd, + cvtsd2ss, cvtsi2sd, cvtss2sd, divsd, maxsd, minsd, movq, //movd, movsd, mulsd, subsd, ucomisd, + xorpd, // SSE4.1 roundss, roundsd, @@ -318,7 +322,7 @@ pub const Op = enum { m, moffs, sreg, - xmm, xmm_m32, xmm_m64, + xmm, xmm_m32, xmm_m64, xmm_m128, // zig fmt: on pub fn fromOperand(operand: Instruction.Operand) Op { @@ -400,7 +404,7 @@ pub const Op = enum { .imm32, .imm32s, .eax, .r32, .m32, .rm32, .rel32, .xmm_m32 => 32, .imm64, .rax, .r64, .m64, .rm64, .xmm_m64 => 64, .m80 => 80, - .m128, .xmm => 128, + .m128, .xmm, .xmm_m128 => 128, }; } @@ -423,8 +427,8 @@ pub const Op = enum { .al, .ax, .eax, .rax, .r8, .r16, .r32, .r64, .rm8, .rm16, .rm32, .rm64, - .xmm, .xmm_m32, .xmm_m64, - => true, + .xmm, .xmm_m32, .xmm_m64, .xmm_m128, + => true, else => false, }; // zig fmt: on @@ -449,7 +453,7 @@ pub const Op = enum { .rm8, .rm16, .rm32, .rm64, .m8, .m16, .m32, .m64, .m80, .m128, .m, - .xmm_m32, .xmm_m64, + .xmm_m32, .xmm_m64, .xmm_m128, => true, else => false, }; @@ -470,13 +474,13 @@ pub const Op = enum { .r8, .r16, .r32, .r64 => .general_purpose, .rm8, .rm16, .rm32, .rm64 => .general_purpose, .sreg => .segment, - .xmm, .xmm_m32, .xmm_m64 => .floating_point, + .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => .floating_point, }; } pub fn isFloatingPointRegister(op: Op) bool { return switch (op) { - .xmm, .xmm_m32, .xmm_m64 => true, + .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => true, else => false, }; } @@ -535,6 +539,7 @@ pub const Mode = enum { rex, long, sse, + sse_long, sse2, sse2_long, sse4_1, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index af0146c6e1..a961100687 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -95,6 +95,7 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .addss, .cmpss, + .cvtsi2ss, .divss, .maxss, .minss, @@ -103,8 +104,12 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .roundss, .subss, .ucomiss, + .xorps, .addsd, .cmpsd, + .cvtsd2ss, + .cvtsi2sd, + .cvtss2sd, .divsd, .maxsd, .minsd, @@ -113,6 +118,7 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .roundsd, .subsd, .ucomisd, + .xorpd, => try lower.mirGeneric(inst), .cmps, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index c8703373d2..c14338b13d 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -170,6 +170,8 @@ pub const Inst = struct { addss, /// Compare scalar single-precision floating-point values cmpss, + /// Convert doubleword integer to scalar single-precision floating-point value + cvtsi2ss, /// Divide scalar single-precision floating-point values divss, /// Return maximum single-precision floating-point value @@ -186,10 +188,18 @@ pub const Inst = struct { subss, /// Unordered compare scalar single-precision floating-point values ucomiss, + /// Bitwise logical xor of packed single precision floating-point values + xorps, /// Add double precision floating point values addsd, /// Compare scalar double-precision floating-point values cmpsd, + /// Convert scalar double-precision floating-point value to scalar single-precision floating-point value + cvtsd2ss, + /// Convert doubleword integer to scalar double-precision floating-point value + cvtsi2sd, + /// Convert scalar single-precision floating-point value to scalar double-precision floating-point value + cvtss2sd, /// Divide scalar double-precision floating-point values divsd, /// Return maximum double-precision floating-point value @@ -206,6 +216,8 @@ pub const Inst = struct { subsd, /// Unordered compare scalar double-precision floating-point values ucomisd, + /// Bitwise logical xor of packed double precision floating-point values + xorpd, /// Compare string operands cmps, diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index 329dfca924..4c900697f5 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -323,7 +323,7 @@ pub const Instruction = struct { var rex = Rex{}; rex.present = inst.encoding.data.mode == .rex; switch (inst.encoding.data.mode) { - .long, .sse2_long => rex.w = true, + .long, .sse_long, .sse2_long => rex.w = true, else => {}, } diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 333bdceea8..ac427c3633 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -834,6 +834,9 @@ pub const table = [_]Entry{ .{ .cmpss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0xf3, 0x0f, 0xc2 }, 0, .sse }, + .{ .cvtsi2ss, .rm, &.{ .xmm, .rm32 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .sse }, + .{ .cvtsi2ss, .rm, &.{ .xmm, .rm64 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .sse_long }, + .{ .divss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5e }, 0, .sse }, .{ .maxss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5f }, 0, .sse }, @@ -849,11 +852,20 @@ pub const table = [_]Entry{ .{ .ucomiss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0x0f, 0x2e }, 0, .sse }, + .{ .xorps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x57 }, 0, .sse }, + // SSE2 .{ .addsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x58 }, 0, .sse2 }, .{ .cmpsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0xf2, 0x0f, 0xc2 }, 0, .sse2 }, + .{ .cvtsd2ss, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .sse2 }, + + .{ .cvtsi2sd, .rm, &.{ .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .sse2 }, + .{ .cvtsi2sd, .rm, &.{ .xmm, .rm64 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .sse2_long }, + + .{ .cvtss2sd, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5a }, 0, .sse2 }, + .{ .divsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5e }, 0, .sse2 }, .{ .maxsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5f }, 0, .sse2 }, @@ -878,6 +890,8 @@ pub const table = [_]Entry{ .{ .ucomisd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x2e }, 0, .sse2 }, + .{ .xorpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .sse2 }, + // SSE4.1 .{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .sse4_1 }, .{ .roundsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .sse4_1 }, diff --git a/src/codegen.zig b/src/codegen.zig index bf80a90cc3..0043b38a5b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -291,6 +291,20 @@ pub fn generateSymbol( }, }, .Pointer => switch (typed_value.val.tag()) { + .null_value => { + switch (target.cpu.arch.ptrBitWidth()) { + 32 => { + mem.writeInt(u32, try code.addManyAsArray(4), 0, endian); + if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 4); + }, + 64 => { + mem.writeInt(u64, try code.addManyAsArray(8), 0, endian); + if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 8); + }, + else => unreachable, + } + return Result.ok; + }, .zero, .one, .int_u64, .int_big_positive => { switch (target.cpu.arch.ptrBitWidth()) { 32 => { @@ -397,30 +411,15 @@ pub fn generateSymbol( }, } }, - .elem_ptr => { - const elem_ptr = typed_value.val.castTag(.elem_ptr).?.data; - const elem_size = typed_value.ty.childType().abiSize(target); - const addend = @intCast(u32, elem_ptr.index * elem_size); - const array_ptr = elem_ptr.array_ptr; - - switch (array_ptr.tag()) { - .decl_ref => { - const decl_index = array_ptr.castTag(.decl_ref).?.data; - return lowerDeclRef(bin_file, src_loc, typed_value, decl_index, code, debug_output, .{ - .parent_atom_index = reloc_info.parent_atom_index, - .addend = (reloc_info.addend orelse 0) + addend, - }); - }, - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for pointer type value: '{s}'", - .{@tagName(typed_value.val.tag())}, - ), - }, - } - }, + .elem_ptr => return lowerParentPtr( + bin_file, + src_loc, + typed_value, + typed_value.val, + code, + debug_output, + reloc_info, + ), else => return Result{ .fail = try ErrorMsg.create( bin_file.allocator, @@ -838,9 +837,62 @@ pub fn generateSymbol( } } +fn lowerParentPtr( + bin_file: *link.File, + src_loc: Module.SrcLoc, + typed_value: TypedValue, + parent_ptr: Value, + code: *std.ArrayList(u8), + debug_output: DebugInfoOutput, + reloc_info: RelocInfo, +) CodeGenError!Result { + const target = bin_file.options.target; + + switch (parent_ptr.tag()) { + .elem_ptr => { + const elem_ptr = parent_ptr.castTag(.elem_ptr).?.data; + return lowerParentPtr( + bin_file, + src_loc, + typed_value, + elem_ptr.array_ptr, + code, + debug_output, + reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(target))), + ); + }, + .decl_ref => { + const decl_index = parent_ptr.castTag(.decl_ref).?.data; + return lowerDeclRef( + bin_file, + src_loc, + typed_value, + decl_index, + code, + debug_output, + reloc_info, + ); + }, + else => |t| { + return Result{ + .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement lowerParentPtr for type '{s}'", + .{@tagName(t)}, + ), + }; + }, + } +} + const RelocInfo = struct { parent_atom_index: u32, addend: ?u32 = null, + + fn offset(ri: RelocInfo, addend: u32) RelocInfo { + return .{ .parent_atom_index = ri.parent_atom_index, .addend = (ri.addend orelse 0) + addend }; + } }; fn lowerDeclRef( @@ -1095,6 +1147,9 @@ pub fn genTypedValue( .Slice => {}, else => { switch (typed_value.val.tag()) { + .null_value => { + return GenResult.mcv(.{ .immediate = 0 }); + }, .int_u64 => { return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) }); }, -- cgit v1.2.3 From 1fd48815c6e22b266b318f58c6b3c828b20ace80 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 28 Apr 2023 01:56:18 -0400 Subject: x86_64: cleanup unneeded code --- src/arch/x86_64/CodeGen.zig | 130 +++++++++++++++++++------------------------- 1 file changed, 56 insertions(+), 74 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index d9c7298c95..915b36f267 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -6595,35 +6595,26 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (Air.refToIndex(pl_op.operand)) |op_inst| self.processDeath(op_inst); } - const outer_state = try self.saveState(); - { - self.scope_generation += 1; - const inner_state = try self.saveState(); + self.scope_generation += 1; + const state = try self.saveState(); - for (liveness_cond_br.then_deaths) |operand| self.processDeath(operand); - try self.genBody(then_body); - try self.restoreState(inner_state, &.{}, .{ - .emit_instructions = false, - .update_tracking = true, - .resurrect = true, - .close_scope = true, - }); + for (liveness_cond_br.then_deaths) |operand| self.processDeath(operand); + try self.genBody(then_body); + try self.restoreState(state, &.{}, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); - try self.performReloc(reloc); + try self.performReloc(reloc); - for (liveness_cond_br.else_deaths) |operand| self.processDeath(operand); - try self.genBody(else_body); - try self.restoreState(inner_state, &.{}, .{ - .emit_instructions = false, - .update_tracking = true, - .resurrect = true, - .close_scope = true, - }); - } - try self.restoreState(outer_state, &.{}, .{ + for (liveness_cond_br.else_deaths) |operand| self.processDeath(operand); + try self.genBody(else_body); + try self.restoreState(state, &.{}, .{ .emit_instructions = false, - .update_tracking = false, - .resurrect = false, + .update_tracking = true, + .resurrect = true, .close_scope = true, }); @@ -6996,64 +6987,55 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { if (Air.refToIndex(pl_op.operand)) |op_inst| self.processDeath(op_inst); } - const outer_state = try self.saveState(); - { - self.scope_generation += 1; - const inner_state = try self.saveState(); - - while (case_i < switch_br.data.cases_len) : (case_i += 1) { - const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @ptrCast( - []const Air.Inst.Ref, - self.air.extra[case.end..][0..case.data.items_len], - ); - const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; - extra_index = case.end + items.len + case_body.len; + self.scope_generation += 1; + const state = try self.saveState(); - var relocs = try self.gpa.alloc(u32, items.len); - defer self.gpa.free(relocs); + while (case_i < switch_br.data.cases_len) : (case_i += 1) { + const case = self.air.extraData(Air.SwitchBr.Case, extra_index); + const items = @ptrCast( + []const Air.Inst.Ref, + self.air.extra[case.end..][0..case.data.items_len], + ); + const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; + extra_index = case.end + items.len + case_body.len; - for (items, relocs) |item, *reloc| { - try self.spillEflagsIfOccupied(); - const item_mcv = try self.resolveInst(item); - try self.genBinOpMir(.cmp, condition_ty, condition, item_mcv); - reloc.* = try self.asmJccReloc(undefined, .ne); - } + var relocs = try self.gpa.alloc(u32, items.len); + defer self.gpa.free(relocs); - for (liveness.deaths[case_i]) |operand| self.processDeath(operand); + for (items, relocs) |item, *reloc| { + try self.spillEflagsIfOccupied(); + const item_mcv = try self.resolveInst(item); + try self.genBinOpMir(.cmp, condition_ty, condition, item_mcv); + reloc.* = try self.asmJccReloc(undefined, .ne); + } - try self.genBody(case_body); - try self.restoreState(inner_state, &.{}, .{ - .emit_instructions = false, - .update_tracking = true, - .resurrect = true, - .close_scope = true, - }); + for (liveness.deaths[case_i]) |operand| self.processDeath(operand); - for (relocs) |reloc| try self.performReloc(reloc); - } + try self.genBody(case_body); + try self.restoreState(state, &.{}, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); - if (switch_br.data.else_body_len > 0) { - const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len]; + for (relocs) |reloc| try self.performReloc(reloc); + } - const else_deaths = liveness.deaths.len - 1; - for (liveness.deaths[else_deaths]) |operand| self.processDeath(operand); + if (switch_br.data.else_body_len > 0) { + const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len]; - try self.genBody(else_body); - try self.restoreState(inner_state, &.{}, .{ - .emit_instructions = false, - .update_tracking = true, - .resurrect = true, - .close_scope = true, - }); - } + const else_deaths = liveness.deaths.len - 1; + for (liveness.deaths[else_deaths]) |operand| self.processDeath(operand); + + try self.genBody(else_body); + try self.restoreState(state, &.{}, .{ + .emit_instructions = false, + .update_tracking = true, + .resurrect = true, + .close_scope = true, + }); } - try self.restoreState(outer_state, &.{}, .{ - .emit_instructions = false, - .update_tracking = false, - .resurrect = false, - .close_scope = true, - }); // We already took care of pl_op.operand earlier, so we're going to pass .none here return self.finishAir(inst, .unreach, .{ .none, .none, .none }); -- cgit v1.2.3 From 50f96c2949bd7d396eaaf391ffd5fd55e59ebac1 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 28 Apr 2023 02:24:29 -0400 Subject: x86_64: fix stack realignment --- src/arch/x86_64/CodeGen.zig | 7 ++++--- test/behavior/align.zig | 5 ----- 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 915b36f267..955a0ba843 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1673,15 +1673,16 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { const frame_allocs_slice = self.frame_allocs.slice(); const frame_size = frame_allocs_slice.items(.abi_size); const frame_align = frame_allocs_slice.items(.abi_align); + + const stack_frame_align = &frame_align[@enumToInt(FrameIndex.stack_frame)]; + stack_frame_align.* = @max(stack_frame_align.*, alloc.abi_align); + for (self.free_frame_indices.keys(), 0..) |frame_index, free_i| { const abi_size = frame_size[@enumToInt(frame_index)]; if (abi_size != alloc.abi_size) continue; const abi_align = &frame_align[@enumToInt(frame_index)]; abi_align.* = @max(abi_align.*, alloc.abi_align); - const stack_frame_align = &frame_align[@enumToInt(FrameIndex.stack_frame)]; - stack_frame_align.* = @max(stack_frame_align.*, alloc.abi_align); - _ = self.free_frame_indices.swapRemoveAt(free_i); return frame_index; } diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 7755cdaa7d..bfc9997dd2 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -282,7 +282,6 @@ fn give() anyerror!u128 { test "page aligned array on stack" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -491,10 +490,6 @@ test "read 128-bit field from default aligned struct in global memory" { } test "struct field explicit alignment" { - if (builtin.zig_backend == .stage2_x86_64) { - // Careful enabling this test, fails randomly. - return error.SkipZigTest; - } if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From aaef5288f886c86e4257c5c67122a0be8a64c134 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 28 Apr 2023 17:35:17 -0400 Subject: x86_64: fix 128-bit cmpxchg --- src/arch/x86_64/CodeGen.zig | 134 ++++++++++++++++++++++---------------------- 1 file changed, 67 insertions(+), 67 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 955a0ba843..99b3c913a0 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -219,9 +219,9 @@ pub const MCValue = union(enum) { .dead, .undef, .immediate, + .eflags, .register, .register_offset, - .eflags, .register_overflow, .lea_direct, .lea_got, @@ -297,6 +297,41 @@ pub const MCValue = union(enum) { }; } + fn mem(mcv: MCValue, ptr_size: Memory.PtrSize) Memory { + return switch (mcv) { + .none, + .unreach, + .dead, + .undef, + .immediate, + .eflags, + .register, + .register_offset, + .register_overflow, + .load_direct, + .lea_direct, + .load_got, + .lea_got, + .load_tlv, + .lea_tlv, + .lea_frame, + .reserved_frame, + => unreachable, + .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| + Memory.sib(ptr_size, .{ .base = .{ .reg = .ds }, .disp = small_addr }) + else + Memory.moffs(.ds, addr), + .indirect => |reg_off| Memory.sib(ptr_size, .{ + .base = .{ .reg = reg_off.reg }, + .disp = reg_off.off, + }), + .load_frame => |frame_addr| Memory.sib(ptr_size, .{ + .base = .{ .frame = frame_addr.index }, + .disp = frame_addr.off, + }), + }; + } + pub fn format( mcv: MCValue, comptime _: []const u8, @@ -7936,70 +7971,50 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr_ty = self.air.typeOf(extra.ptr); - const ptr_mcv = try self.resolveInst(extra.ptr); const val_ty = self.air.typeOf(extra.expected_value); const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); - for (regs_lock) |lock| self.register_manager.unlockReg(lock); + defer for (regs_lock) |lock| self.register_manager.unlockReg(lock); const exp_mcv = try self.resolveInst(extra.expected_value); - if (val_abi_size > 8) switch (exp_mcv) { - .load_frame => |frame_addr| { - try self.genSetReg(.rax, Type.usize, .{ .load_frame = .{ - .index = frame_addr.index, - .off = frame_addr.off + 0, - } }); - try self.genSetReg(.rdx, Type.usize, .{ .load_frame = .{ - .index = frame_addr.index, - .off = frame_addr.off + 8, - } }); - }, - else => return self.fail("TODO implement cmpxchg for {s}", .{@tagName(exp_mcv)}), + if (val_abi_size > 8) { + try self.genSetReg(.rax, Type.usize, exp_mcv); + try self.genSetReg(.rdx, Type.usize, exp_mcv.address().offset(8).deref()); } else try self.genSetReg(.rax, val_ty, exp_mcv); - const rax_lock = self.register_manager.lockRegAssumeUnused(.rax); - defer self.register_manager.unlockReg(rax_lock); const new_mcv = try self.resolveInst(extra.new_value); - const new_reg: Register = if (val_abi_size > 8) switch (new_mcv) { - .load_frame => |frame_addr| new: { - try self.genSetReg(.rbx, Type.usize, .{ .load_frame = .{ - .index = frame_addr.index, - .off = frame_addr.off + 0, - } }); - try self.genSetReg(.rcx, Type.usize, .{ .load_frame = .{ - .index = frame_addr.index, - .off = frame_addr.off + 8, - } }); - break :new undefined; - }, - else => return self.fail("TODO implement cmpxchg for {s}", .{@tagName(exp_mcv)}), + const new_reg = if (val_abi_size > 8) new: { + try self.genSetReg(.rbx, Type.usize, new_mcv); + try self.genSetReg(.rcx, Type.usize, new_mcv.address().offset(8).deref()); + break :new null; } else try self.copyToTmpRegister(val_ty, new_mcv); - const new_lock = self.register_manager.lockRegAssumeUnused(new_reg); - defer self.register_manager.unlockReg(new_lock); + const new_lock = if (new_reg) |reg| self.register_manager.lockRegAssumeUnused(reg) else null; + defer if (new_lock) |lock| self.register_manager.unlockReg(lock); + const ptr_mcv = try self.resolveInst(extra.ptr); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); const ptr_mem = switch (ptr_mcv) { - .register => |reg| Memory.sib(ptr_size, .{ .base = .{ .reg = reg } }), - .lea_frame => |frame_addr| Memory.sib(ptr_size, .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, + .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), + else => Memory.sib(ptr_size, .{ + .base = .{ .reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv) }, }), - else => Memory.sib(ptr_size, .{ .base = .{ - .reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv), - } }), }; - const mem_lock = switch (ptr_mem.base()) { + switch (ptr_mem) { + .sib, .rip => {}, + .moffs => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}), + } + const ptr_lock = switch (ptr_mem.base()) { .none, .frame => null, .reg => |reg| self.register_manager.lockReg(reg), }; - defer if (mem_lock) |lock| self.register_manager.unlockReg(lock); + defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); try self.spillEflagsIfOccupied(); if (val_abi_size <= 8) { _ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{ - .r = registerAlias(new_reg, val_abi_size), + .r = registerAlias(new_reg.?, val_abi_size), .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), } } }); } else { @@ -8017,24 +8032,9 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { } const dst_mcv = try self.allocRegOrMem(inst, false); - try self.genSetMem( - .{ .frame = dst_mcv.load_frame.index }, - dst_mcv.load_frame.off + 16, - Type.bool, - .{ .eflags = .ne }, - ); - try self.genSetMem( - .{ .frame = dst_mcv.load_frame.index }, - dst_mcv.load_frame.off + 8, - Type.usize, - .{ .register = .rdx }, - ); - try self.genSetMem( - .{ .frame = dst_mcv.load_frame.index }, - dst_mcv.load_frame.off + 0, - Type.usize, - .{ .register = .rax }, - ); + try self.genCopy(Type.usize, dst_mcv, .{ .register = .rax }); + try self.genCopy(Type.usize, dst_mcv.address().offset(8).deref(), .{ .register = .rdx }); + try self.genCopy(Type.bool, dst_mcv.address().offset(16).deref(), .{ .eflags = .ne }); break :result dst_mcv; }; return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); @@ -8065,15 +8065,15 @@ fn atomicOp( const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); const ptr_mem = switch (ptr_mcv) { - .register => |reg| Memory.sib(ptr_size, .{ .base = .{ .reg = reg } }), - .lea_frame => |frame_addr| Memory.sib(ptr_size, .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, + .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), + else => Memory.sib(ptr_size, .{ + .base = .{ .reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv) }, }), - else => Memory.sib(ptr_size, .{ .base = .{ - .reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv), - } }), }; + switch (ptr_mem) { + .sib, .rip => {}, + .moffs => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}), + } const mem_lock = switch (ptr_mem.base()) { .none, .frame => null, .reg => |reg| self.register_manager.lockReg(reg), -- cgit v1.2.3 From c3889600424e3720dc07b22397129f82f2111d72 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 28 Apr 2023 20:02:28 -0400 Subject: x86_64: fix large not and atomicrmw --- src/arch/x86_64/CodeGen.zig | 200 +++++++++++++++++++------------------------- test/behavior/atomics.zig | 1 - test/behavior/math.zig | 1 - 3 files changed, 86 insertions(+), 116 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 99b3c913a0..670dc6840c 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2050,13 +2050,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { registerAlias(src_reg, min_abi_size), ); }, - .load_frame => |frame_addr| try self.asmRegisterMemory( + .memory, .indirect, .load_frame => try self.asmRegisterMemory( tag, dst_alias, - Memory.sib(Memory.PtrSize.fromSize(min_abi_size), .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, - }), + src_mcv.mem(Memory.PtrSize.fromSize(min_abi_size)), ), else => return self.fail("TODO airIntCast from {s} to {s}", .{ @tagName(src_mcv), @@ -2738,19 +2735,9 @@ fn genIntMulDivOpMir( }; switch (mat_rhs) { .register => |reg| try self.asmRegister(tag, registerAlias(reg, abi_size)), - .indirect, .load_frame => try self.asmMemory( + .memory, .indirect, .load_frame => try self.asmMemory( tag, - Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (mat_rhs) { - .indirect => |reg_off| .{ - .base = .{ .reg = reg_off.reg }, - .disp = reg_off.off, - }, - .load_frame => |frame_addr| .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, - }, - else => unreachable, - }), + mat_rhs.mem(Memory.PtrSize.fromSize(abi_size)), ), else => unreachable, } @@ -4628,9 +4615,6 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: if (src_ty.zigTypeTag() == .Vector) { return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); } - if (src_ty.abiSize(self.target.*) > 8) { - return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); - } switch (src_mcv) { .eflags => |cc| switch (tag) { @@ -4646,13 +4630,13 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: }; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const dst_mcv: MCValue = if (maybe_inst) |inst| - if (self.reuseOperand(inst, src_air, 0, src_mcv)) - src_mcv - else - try self.copyToRegisterWithInstTracking(inst, src_ty, src_mcv) - else - .{ .register = try self.copyToTmpRegister(src_ty, src_mcv) }; + const dst_mcv: MCValue = dst: { + if (maybe_inst) |inst| if (self.reuseOperand(inst, src_air, 0, src_mcv)) break :dst src_mcv; + + const dst_mcv = try self.allocRegOrMemAdvanced(src_ty, maybe_inst, true); + try self.genCopy(src_ty, dst_mcv, src_mcv); + break :dst dst_mcv; + }; const dst_lock = switch (dst_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -4661,19 +4645,33 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { + const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(self.target.*), 8)); const int_info = if (src_ty.tag() == .bool) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else src_ty.intInfo(self.target.*); - const extra_bits = self.regExtraBits(src_ty); - if (int_info.signedness == .unsigned and extra_bits > 0) { - const mask = (@as(u64, 1) << @intCast(u6, src_ty.bitSize(self.target.*))) - 1; - try self.genBinOpMir(.xor, src_ty, dst_mcv, .{ .immediate = mask }); - } else try self.genUnOpMir(.not, src_ty, dst_mcv); - }, + var byte_off: i32 = 0; + while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) { + var limb_pl = Type.Payload.Bits{ + .base = .{ .tag = switch (int_info.signedness) { + .signed => .int_signed, + .unsigned => .int_unsigned, + } }, + .data = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)), + }; + const limb_ty = Type.initPayload(&limb_pl.base); + const limb_mcv = switch (byte_off) { + 0 => dst_mcv, + else => dst_mcv.address().offset(byte_off).deref(), + }; + if (limb_pl.base.tag == .int_unsigned and self.regExtraBits(limb_ty) > 0) { + const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_pl.data); + try self.genBinOpMir(.xor, limb_ty, limb_mcv, .{ .immediate = mask }); + } else try self.genUnOpMir(.not, limb_ty, limb_mcv); + } + }, .neg => try self.genUnOpMir(.neg, src_ty, dst_mcv), - else => unreachable, } return dst_mcv; @@ -4714,17 +4712,7 @@ fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValue }, .indirect, .load_frame => try self.asmMemory( mir_tag, - Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (dst_mcv) { - .indirect => |reg_off| .{ - .base = .{ .reg = reg_off.reg }, - .disp = reg_off.off, - }, - .load_frame => |frame_addr| .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, - }, - else => unreachable, - }), + dst_mcv.mem(Memory.PtrSize.fromSize(abi_size)), ), } } @@ -8179,12 +8167,9 @@ fn atomicOp( registerAlias(val_reg, cmov_abi_size), cc, ), - .load_frame => |frame_addr| try self.asmCmovccRegisterMemory( + .memory, .indirect, .load_frame => try self.asmCmovccRegisterMemory( registerAlias(tmp_reg, cmov_abi_size), - Memory.sib(Memory.PtrSize.fromSize(cmov_abi_size), .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, - }), + val_mcv.mem(Memory.PtrSize.fromSize(cmov_abi_size)), cc, ), else => { @@ -8207,72 +8192,62 @@ fn atomicOp( } else { try self.asmRegisterMemory(.mov, .rax, Memory.sib(.qword, .{ .base = ptr_mem.sib.base, - .scale_index = ptr_mem.sib.scale_index, + .scale_index = ptr_mem.scaleIndex(), .disp = ptr_mem.sib.disp + 0, })); try self.asmRegisterMemory(.mov, .rdx, Memory.sib(.qword, .{ .base = ptr_mem.sib.base, - .scale_index = ptr_mem.sib.scale_index, + .scale_index = ptr_mem.scaleIndex(), .disp = ptr_mem.sib.disp + 8, })); const loop = @intCast(u32, self.mir_instructions.len); - switch (val_mcv) { - .load_frame => |frame_addr| { - const val_lo_mem = Memory.sib(.qword, .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off + 0, - }); - const val_hi_mem = Memory.sib(.qword, .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off + 8, - }); - - if (rmw_op != std.builtin.AtomicRmwOp.Xchg) { - try self.asmRegisterRegister(.mov, .rbx, .rax); - try self.asmRegisterRegister(.mov, .rcx, .rdx); - } - if (rmw_op) |op| switch (op) { - .Xchg => { - try self.asmRegisterMemory(.mov, .rbx, val_lo_mem); - try self.asmRegisterMemory(.mov, .rcx, val_hi_mem); - }, - .Add => { - try self.asmRegisterMemory(.add, .rbx, val_lo_mem); - try self.asmRegisterMemory(.adc, .rcx, val_hi_mem); - }, - .Sub => { - try self.asmRegisterMemory(.sub, .rbx, val_lo_mem); - try self.asmRegisterMemory(.sbb, .rcx, val_hi_mem); - }, - .And => { - try self.asmRegisterMemory(.@"and", .rbx, val_lo_mem); - try self.asmRegisterMemory(.@"and", .rcx, val_hi_mem); - }, - .Nand => { - try self.asmRegisterMemory(.@"and", .rbx, val_lo_mem); - try self.asmRegisterMemory(.@"and", .rcx, val_hi_mem); - try self.asmRegister(.not, .rbx); - try self.asmRegister(.not, .rcx); - }, - .Or => { - try self.asmRegisterMemory(.@"or", .rbx, val_lo_mem); - try self.asmRegisterMemory(.@"or", .rcx, val_hi_mem); - }, - .Xor => { - try self.asmRegisterMemory(.xor, .rbx, val_lo_mem); - try self.asmRegisterMemory(.xor, .rcx, val_hi_mem); - }, - else => return self.fail( - "TODO implement x86 atomic loop for large abi {s}", - .{@tagName(op)}, - ), - }; - }, - else => return self.fail( - "TODO implement x86 atomic loop for large abi {s}", - .{@tagName(val_mcv)}, - ), + const val_mem_mcv: MCValue = switch (val_mcv) { + .memory, .indirect, .load_frame => val_mcv, + else => .{ .indirect = .{ + .reg = try self.copyToTmpRegister(Type.usize, val_mcv.address()), + } }, + }; + const val_lo_mem = val_mem_mcv.mem(.qword); + const val_hi_mem = val_mem_mcv.address().offset(8).deref().mem(.qword); + if (rmw_op != std.builtin.AtomicRmwOp.Xchg) { + try self.asmRegisterRegister(.mov, .rbx, .rax); + try self.asmRegisterRegister(.mov, .rcx, .rdx); } + if (rmw_op) |op| switch (op) { + .Xchg => { + try self.asmRegisterMemory(.mov, .rbx, val_lo_mem); + try self.asmRegisterMemory(.mov, .rcx, val_hi_mem); + }, + .Add => { + try self.asmRegisterMemory(.add, .rbx, val_lo_mem); + try self.asmRegisterMemory(.adc, .rcx, val_hi_mem); + }, + .Sub => { + try self.asmRegisterMemory(.sub, .rbx, val_lo_mem); + try self.asmRegisterMemory(.sbb, .rcx, val_hi_mem); + }, + .And => { + try self.asmRegisterMemory(.@"and", .rbx, val_lo_mem); + try self.asmRegisterMemory(.@"and", .rcx, val_hi_mem); + }, + .Nand => { + try self.asmRegisterMemory(.@"and", .rbx, val_lo_mem); + try self.asmRegisterMemory(.@"and", .rcx, val_hi_mem); + try self.asmRegister(.not, .rbx); + try self.asmRegister(.not, .rcx); + }, + .Or => { + try self.asmRegisterMemory(.@"or", .rbx, val_lo_mem); + try self.asmRegisterMemory(.@"or", .rcx, val_hi_mem); + }, + .Xor => { + try self.asmRegisterMemory(.xor, .rbx, val_lo_mem); + try self.asmRegisterMemory(.xor, .rcx, val_hi_mem); + }, + else => return self.fail("TODO implement x86 atomic loop for {} {s}", .{ + val_ty.fmt(self.bin_file.options.module.?), @tagName(op), + }), + }; _ = try self.addInst(.{ .tag = .cmpxchgb, .ops = .lock_m_sib, .data = .{ .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), } }); @@ -9177,15 +9152,16 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { } fn regBitSize(self: *Self, ty: Type) u64 { + const abi_size = ty.abiSize(self.target.*); return switch (ty.zigTypeTag()) { - else => switch (ty.abiSize(self.target.*)) { + else => switch (abi_size) { 1 => 8, 2 => 16, 3...4 => 32, 5...8 => 64, else => unreachable, }, - .Float => switch (ty.abiSize(self.target.*)) { + .Float => switch (abi_size) { 1...16 => 128, 17...32 => 256, else => unreachable, @@ -9197,10 +9173,6 @@ fn regExtraBits(self: *Self, ty: Type) u64 { return self.regBitSize(ty) - ty.bitSize(self.target.*); } -fn hasAvxSupport(target: Target) bool { - return Target.x86.featureSetHasAny(target.cpu.features, .{ .avx, .avx2 }); -} - fn getSymbolIndexForDecl(self: *Self, decl_index: Module.Decl.Index) !u32 { if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(decl_index); diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index 19afa79683..56854d43d8 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -112,7 +112,6 @@ test "128-bit cmpxchg" { if (!supports_128_bit_atomics) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 03a92a83e9..01b927b913 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -379,7 +379,6 @@ fn testBinaryNot(x: u16) !void { test "binary not 128-bit" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 3c2636a83dbb964f80f93131b30222ad3889a7e9 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 29 Apr 2023 17:58:09 -0400 Subject: x86_64: implement more forms of wide mul with overflow --- src/arch/x86_64/CodeGen.zig | 263 ++++++++++++++++++++------------------------ 1 file changed, 122 insertions(+), 141 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 670dc6840c..a09759951d 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2434,12 +2434,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); - try self.genSetFrameTruncatedOverflowCompare( - tuple_ty, - frame_index, - partial_mcv.register, - cc, - ); + try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, else => unreachable, @@ -2511,12 +2506,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); - try self.genSetFrameTruncatedOverflowCompare( - tuple_ty, - frame_index, - partial_mcv.register, - cc, - ); + try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, else => unreachable, @@ -2529,173 +2519,164 @@ fn genSetFrameTruncatedOverflowCompare( self: *Self, tuple_ty: Type, frame_index: FrameIndex, - reg: Register, + src_mcv: MCValue, cc: Condition, ) !void { - const reg_lock = self.register_manager.lockReg(reg); - defer if (reg_lock) |lock| self.register_manager.unlockReg(lock); + const src_lock = switch (src_mcv) { + .register => |reg| self.register_manager.lockReg(reg), + else => null, + }; + defer if (src_lock) |lock| self.register_manager.unlockReg(lock); const ty = tuple_ty.structFieldType(0); const int_info = ty.intInfo(self.target.*); - const extended_ty = switch (int_info.signedness) { - .signed => Type.isize, - .unsigned => ty, + + var hi_limb_pl = Type.Payload.Bits{ + .base = .{ .tag = switch (int_info.signedness) { + .signed => .int_signed, + .unsigned => .int_unsigned, + } }, + .data = (int_info.bits - 1) % 64 + 1, }; + const hi_limb_ty = Type.initPayload(&hi_limb_pl.base); - const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp); - const temp_regs_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs); - defer for (temp_regs_locks) |rreg| { - self.register_manager.unlockReg(rreg); + var rest_pl = Type.Payload.Bits{ + .base = .{ .tag = .int_unsigned }, + .data = int_info.bits - hi_limb_pl.data, }; + const rest_ty = Type.initPayload(&rest_pl.base); + + const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp); + const temp_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs); + defer for (temp_locks) |lock| self.register_manager.unlockReg(lock); const overflow_reg = temp_regs[0]; try self.asmSetccRegister(overflow_reg.to8(), cc); const scratch_reg = temp_regs[1]; - try self.genSetReg(scratch_reg, extended_ty, .{ .register = reg }); - try self.truncateRegister(ty, scratch_reg); - try self.genBinOpMir( - .cmp, - extended_ty, - .{ .register = reg }, - .{ .register = scratch_reg }, - ); + const hi_limb_off = if (int_info.bits <= 64) 0 else (int_info.bits - 1) / 64 * 8; + const hi_limb_mcv = if (hi_limb_off > 0) + src_mcv.address().offset(int_info.bits / 64 * 8).deref() + else + src_mcv; + try self.genSetReg(scratch_reg, hi_limb_ty, hi_limb_mcv); + try self.truncateRegister(hi_limb_ty, scratch_reg); + try self.genBinOpMir(.cmp, hi_limb_ty, .{ .register = scratch_reg }, hi_limb_mcv); const eq_reg = temp_regs[2]; try self.asmSetccRegister(eq_reg.to8(), .ne); - try self.genBinOpMir( - .@"or", - Type.u8, - .{ .register = overflow_reg }, - .{ .register = eq_reg }, - ); + try self.genBinOpMir(.@"or", Type.u8, .{ .register = overflow_reg }, .{ .register = eq_reg }); + const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)); + if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), - tuple_ty.structFieldType(1), - .{ .register = overflow_reg.to8() }, + payload_off + hi_limb_off, + hi_limb_ty, + .{ .register = scratch_reg }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), - ty, - .{ .register = scratch_reg }, + @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + tuple_ty.structFieldType(1), + .{ .register = overflow_reg.to8() }, ); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = result: { - const dst_ty = self.air.typeOf(bin_op.lhs); - switch (dst_ty.zigTypeTag()) { - .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), - .Int => { - try self.spillEflagsIfOccupied(); + const dst_ty = self.air.typeOf(bin_op.lhs); + const result: MCValue = switch (dst_ty.zigTypeTag()) { + .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), + .Int => result: { + try self.spillEflagsIfOccupied(); + try self.spillRegisters(&.{ .rax, .rdx }); - const dst_info = dst_ty.intInfo(self.target.*); - const cc: Condition = switch (dst_info.signedness) { - .unsigned => .c, - .signed => .o, + const dst_info = dst_ty.intInfo(self.target.*); + const cc: Condition = switch (dst_info.signedness) { + .unsigned => .c, + .signed => .o, + }; + + const lhs_active_bits = self.activeIntBits(bin_op.lhs); + const rhs_active_bits = self.activeIntBits(bin_op.rhs); + var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { + .signed => .int_signed, + .unsigned => .int_unsigned, + } }, .data = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2) }; + const src_ty = Type.initPayload(&src_pl.base); + + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + + const tuple_ty = self.air.typeOfIndex(inst); + const extra_bits = if (dst_info.bits <= 64) + self.regExtraBits(dst_ty) + else + dst_info.bits % 64; + const partial_mcv = if (dst_info.signedness == .signed and extra_bits > 0) dst: { + const rhs_lock: ?RegisterLock = switch (rhs) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, }; + defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - const tuple_ty = self.air.typeOfIndex(inst); - if (dst_info.bits >= 8 and math.isPowerOfTwo(dst_info.bits)) { - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = math.max3( - self.activeIntBits(bin_op.lhs), - self.activeIntBits(bin_op.rhs), - dst_info.bits / 2, - ) }; - const src_ty = Type.initPayload(&src_pl.base); + const dst_reg: Register = blk: { + if (lhs.isRegister()) break :blk lhs.register; + break :blk try self.copyToTmpRegister(dst_ty, lhs); + }; + const dst_mcv = MCValue{ .register = dst_reg }; + const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg); + defer self.register_manager.unlockReg(dst_reg_lock); - try self.spillRegisters(&.{ .rax, .rdx }); - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); + const rhs_mcv: MCValue = blk: { + if (rhs.isRegister() or rhs.isMemory()) break :blk rhs; + break :blk MCValue{ .register = try self.copyToTmpRegister(dst_ty, rhs) }; + }; + const rhs_mcv_lock: ?RegisterLock = switch (rhs_mcv) { + .register => |reg| self.register_manager.lockReg(reg), + else => null, + }; + defer if (rhs_mcv_lock) |lock| self.register_manager.unlockReg(lock); - const partial_mcv = try self.genMulDivBinOp(.mul, null, dst_ty, src_ty, lhs, rhs); - switch (partial_mcv) { - .register => |reg| { - self.eflags_inst = inst; - break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } }; - }, - else => {}, - } + try self.genIntMulComplexOpMir(Type.isize, dst_mcv, rhs_mcv); + break :dst dst_mcv; + } else try self.genMulDivBinOp(.mul, null, dst_ty, src_ty, lhs, rhs); - // For now, this is the only supported multiply that doesn't fit in a register. - assert(dst_info.bits == 128 and src_pl.data == 64); + switch (partial_mcv) { + .register => |reg| if (extra_bits == 0) { + self.eflags_inst = inst; + break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } }; + } else { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); - try self.genSetMem( - .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), - tuple_ty.structFieldType(1), - .{ .immediate = 0 }, // overflow is impossible for 64-bit*64-bit -> 128-bit - ); - try self.genSetMem( - .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), - tuple_ty.structFieldType(0), - partial_mcv, - ); + try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; - } - - const dst_reg: Register = dst_reg: { - switch (dst_info.signedness) { - .signed => { - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - - const rhs_lock: ?RegisterLock = switch (rhs) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - - const dst_reg: Register = blk: { - if (lhs.isRegister()) break :blk lhs.register; - break :blk try self.copyToTmpRegister(dst_ty, lhs); - }; - const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg); - defer self.register_manager.unlockReg(dst_reg_lock); - - const rhs_mcv: MCValue = blk: { - if (rhs.isRegister() or rhs.isMemory()) break :blk rhs; - break :blk MCValue{ .register = try self.copyToTmpRegister(dst_ty, rhs) }; - }; - const rhs_mcv_lock: ?RegisterLock = switch (rhs_mcv) { - .register => |reg| self.register_manager.lockReg(reg), - else => null, - }; - defer if (rhs_mcv_lock) |lock| self.register_manager.unlockReg(lock); - - try self.genIntMulComplexOpMir(Type.isize, .{ .register = dst_reg }, rhs_mcv); - - break :dst_reg dst_reg; - }, - .unsigned => { - try self.spillRegisters(&.{ .rax, .rdx }); - - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); - - const dst_mcv = try self.genMulDivBinOp(.mul, null, dst_ty, dst_ty, lhs, rhs); - break :dst_reg dst_mcv.register; - }, - } - }; + }, + // For now, this is the only supported multiply that doesn't fit in a register. + else => assert(dst_info.bits <= 128 and src_pl.data == 64), + } - const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); - try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, dst_reg, cc); - break :result .{ .load_frame = .{ .index = frame_index } }; - }, - else => unreachable, - } + const frame_index = + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { + try self.genSetMem( + .{ .frame = frame_index }, + @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + tuple_ty.structFieldType(0), + partial_mcv, + ); + try self.genSetMem( + .{ .frame = frame_index }, + @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + tuple_ty.structFieldType(1), + .{ .immediate = 0 }, // overflow is impossible for 64-bit*64-bit -> 128-bit + ); + } else try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); + break :result .{ .load_frame = .{ .index = frame_index } }; + }, + else => unreachable, }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -- cgit v1.2.3 From c81878978a41f117818ac5b4918cd952f123bad7 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 29 Apr 2023 17:56:48 -0400 Subject: x86_64: optimize wide mul with overflow --- src/arch/x86_64/CodeGen.zig | 61 ++++++++++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 25 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index a09759951d..7f0d07cf9b 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2520,7 +2520,7 @@ fn genSetFrameTruncatedOverflowCompare( tuple_ty: Type, frame_index: FrameIndex, src_mcv: MCValue, - cc: Condition, + overflow_cc: ?Condition, ) !void { const src_lock = switch (src_mcv) { .register => |reg| self.register_manager.lockReg(reg), @@ -2551,7 +2551,7 @@ fn genSetFrameTruncatedOverflowCompare( defer for (temp_locks) |lock| self.register_manager.unlockReg(lock); const overflow_reg = temp_regs[0]; - try self.asmSetccRegister(overflow_reg.to8(), cc); + if (overflow_cc) |cc| try self.asmSetccRegister(overflow_reg.to8(), cc); const scratch_reg = temp_regs[1]; const hi_limb_off = if (int_info.bits <= 64) 0 else (int_info.bits - 1) / 64 * 8; @@ -2564,8 +2564,10 @@ fn genSetFrameTruncatedOverflowCompare( try self.genBinOpMir(.cmp, hi_limb_ty, .{ .register = scratch_reg }, hi_limb_mcv); const eq_reg = temp_regs[2]; - try self.asmSetccRegister(eq_reg.to8(), .ne); - try self.genBinOpMir(.@"or", Type.u8, .{ .register = overflow_reg }, .{ .register = eq_reg }); + if (overflow_cc) |_| { + try self.asmSetccRegister(eq_reg.to8(), .ne); + try self.genBinOpMir(.@"or", Type.u8, .{ .register = overflow_reg }, .{ .register = eq_reg }); + } const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)); if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv); @@ -2579,7 +2581,7 @@ fn genSetFrameTruncatedOverflowCompare( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), tuple_ty.structFieldType(1), - .{ .register = overflow_reg.to8() }, + if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); } @@ -2654,27 +2656,36 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, - // For now, this is the only supported multiply that doesn't fit in a register. - else => assert(dst_info.bits <= 128 and src_pl.data == 64), - } + else => { + // For now, this is the only supported multiply that doesn't fit in a register, + // so cc being set is impossible. - const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); - if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { - try self.genSetMem( - .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), - tuple_ty.structFieldType(0), - partial_mcv, - ); - try self.genSetMem( - .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), - tuple_ty.structFieldType(1), - .{ .immediate = 0 }, // overflow is impossible for 64-bit*64-bit -> 128-bit - ); - } else try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); - break :result .{ .load_frame = .{ .index = frame_index } }; + assert(dst_info.bits <= 128 and src_pl.data == 64); + + const frame_index = + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { + try self.genSetMem( + .{ .frame = frame_index }, + @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + tuple_ty.structFieldType(0), + partial_mcv, + ); + try self.genSetMem( + .{ .frame = frame_index }, + @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + tuple_ty.structFieldType(1), + .{ .immediate = 0 }, + ); + } else try self.genSetFrameTruncatedOverflowCompare( + tuple_ty, + frame_index, + partial_mcv, + null, + ); + break :result .{ .load_frame = .{ .index = frame_index } }; + }, + } }, else => unreachable, }; -- cgit v1.2.3 From f37ca3fa7370c501c630c53b370fecdeb313e3be Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 29 Apr 2023 19:31:34 -0400 Subject: link: cleanup lazy alignment This gets the alignment from the code that creates a lazy symbol instead of guessing it at every use. --- src/arch/x86_64/CodeGen.zig | 6 ------ src/codegen.zig | 8 ++++---- src/link/Coff.zig | 18 +++++------------- src/link/Elf.zig | 12 ++++-------- src/link/MachO.zig | 14 +++++--------- 5 files changed, 18 insertions(+), 40 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 7f0d07cf9b..eee89e9ded 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -6416,7 +6416,6 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForLazySymbol( .{ .kind = .const_data, .ty = Type.anyerror }, - 4, // dword alignment ); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); @@ -6429,14 +6428,12 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = try coff_file.getOrCreateAtomForLazySymbol( .{ .kind = .const_data, .ty = Type.anyerror }, - 4, // dword alignment ); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom_index = try macho_file.getOrCreateAtomForLazySymbol( .{ .kind = .const_data, .ty = Type.anyerror }, - 4, // dword alignment ); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); @@ -8504,7 +8501,6 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForLazySymbol( .{ .kind = .const_data, .ty = Type.anyerror }, - 4, // dword alignment ); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); @@ -8517,14 +8513,12 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = try coff_file.getOrCreateAtomForLazySymbol( .{ .kind = .const_data, .ty = Type.anyerror }, - 4, // dword alignment ); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom_index = try macho_file.getOrCreateAtomForLazySymbol( .{ .kind = .const_data, .ty = Type.anyerror }, - 4, // dword alignment ); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); diff --git a/src/codegen.zig b/src/codegen.zig index f967566034..690e96d25c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -104,7 +104,7 @@ pub fn generateLazySymbol( code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, -) CodeGenError!Result { +) CodeGenError!struct { res: Result, alignment: u32 } { _ = debug_output; _ = reloc_info; @@ -133,13 +133,13 @@ pub fn generateLazySymbol( code.appendAssumeCapacity(0); } mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); - return Result.ok; - } else return .{ .fail = try ErrorMsg.create( + return .{ .res = Result.ok, .alignment = 4 }; + } else return .{ .res = .{ .fail = try ErrorMsg.create( bin_file.allocator, src_loc, "TODO implement generateLazySymbol for {s} {}", .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(mod) }, - ) }; + ) }, .alignment = undefined }; } pub fn generateSymbol( diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 0af681bb5e..d20d17f2b1 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -145,7 +145,6 @@ const LazySymbolTable = std.AutoArrayHashMapUnmanaged(Module.Decl.OptionalIndex, const LazySymbolMetadata = struct { text_atom: ?Atom.Index = null, rdata_atom: ?Atom.Index = null, - alignment: u32, }; const DeclMetadata = struct { @@ -1195,13 +1194,11 @@ fn updateLazySymbol(self: *Coff, decl: Module.Decl.OptionalIndex, metadata: Lazy link.File.LazySymbol.initDecl(.code, decl, mod), atom, self.text_section_index.?, - metadata.alignment, ); if (metadata.rdata_atom) |atom| try self.updateLazySymbolAtom( link.File.LazySymbol.initDecl(.const_data, decl, mod), atom, self.rdata_section_index.?, - metadata.alignment, ); } @@ -1210,7 +1207,6 @@ fn updateLazySymbolAtom( sym: link.File.LazySymbol, atom_index: Atom.Index, section_index: u16, - required_alignment: u32, ) !void { const gpa = self.base.allocator; const mod = self.base.options.module.?; @@ -1238,7 +1234,7 @@ fn updateLazySymbolAtom( const res = try codegen.generateLazySymbol(&self.base, src, sym, &code_buffer, .none, .{ .parent_atom_index = local_sym_index, }); - const code = switch (res) { + const code = switch (res.res) { .ok => code_buffer.items, .fail => |em| { log.err("{s}", .{em.msg}); @@ -1252,11 +1248,11 @@ fn updateLazySymbolAtom( symbol.section_number = @intToEnum(coff.SectionNumber, section_index + 1); symbol.type = .{ .complex_type = .NULL, .base_type = .NULL }; - const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); + const vaddr = try self.allocateAtom(atom_index, code_len, res.alignment); errdefer self.freeAtom(atom_index); log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr }); - log.debug(" (required alignment 0x{x})", .{required_alignment}); + log.debug(" (required alignment 0x{x})", .{res.alignment}); atom.size = code_len; symbol.value = vaddr; @@ -1265,14 +1261,10 @@ fn updateLazySymbolAtom( try self.writeAtom(atom_index, code); } -pub fn getOrCreateAtomForLazySymbol( - self: *Coff, - sym: link.File.LazySymbol, - alignment: u32, -) !Atom.Index { +pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Atom.Index { const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); errdefer _ = self.lazy_syms.pop(); - if (!gop.found_existing) gop.value_ptr.* = .{ .alignment = alignment }; + if (!gop.found_existing) gop.value_ptr.* = .{}; const atom = switch (sym.kind) { .code => &gop.value_ptr.text_atom, .const_data => &gop.value_ptr.rdata_atom, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 48d952b6cc..b9c113f834 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -67,7 +67,6 @@ const Section = struct { const LazySymbolMetadata = struct { text_atom: ?Atom.Index = null, rodata_atom: ?Atom.Index = null, - alignment: u32, }; const DeclMetadata = struct { @@ -2377,10 +2376,10 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void { } } -pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol, alignment: u32) !Atom.Index { +pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Index { const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); errdefer _ = self.lazy_syms.pop(); - if (!gop.found_existing) gop.value_ptr.* = .{ .alignment = alignment }; + if (!gop.found_existing) gop.value_ptr.* = .{}; const atom = switch (sym.kind) { .code => &gop.value_ptr.text_atom, .const_data => &gop.value_ptr.rodata_atom, @@ -2663,13 +2662,11 @@ fn updateLazySymbol(self: *Elf, decl: Module.Decl.OptionalIndex, metadata: LazyS File.LazySymbol.initDecl(.code, decl, mod), atom, self.text_section_index.?, - metadata.alignment, ); if (metadata.rodata_atom) |atom| try self.updateLazySymbolAtom( File.LazySymbol.initDecl(.const_data, decl, mod), atom, self.rodata_section_index.?, - metadata.alignment, ); } @@ -2678,7 +2675,6 @@ fn updateLazySymbolAtom( sym: File.LazySymbol, atom_index: Atom.Index, shdr_index: u16, - required_alignment: u32, ) !void { const gpa = self.base.allocator; const mod = self.base.options.module.?; @@ -2710,7 +2706,7 @@ fn updateLazySymbolAtom( const res = try codegen.generateLazySymbol(&self.base, src, sym, &code_buffer, .none, .{ .parent_atom_index = local_sym_index, }); - const code = switch (res) { + const code = switch (res.res) { .ok => code_buffer.items, .fail => |em| { log.err("{s}", .{em.msg}); @@ -2728,7 +2724,7 @@ fn updateLazySymbolAtom( .st_value = 0, .st_size = 0, }; - const vaddr = try self.allocateAtom(atom_index, code.len, required_alignment); + const vaddr = try self.allocateAtom(atom_index, code.len, res.alignment); errdefer self.freeAtom(atom_index); log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr }); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 21633dea64..a57742507d 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -238,7 +238,6 @@ const LazySymbolTable = std.AutoArrayHashMapUnmanaged(Module.Decl.OptionalIndex, const LazySymbolMetadata = struct { text_atom: ?Atom.Index = null, data_const_atom: ?Atom.Index = null, - alignment: u32, }; const TlvSymbolTable = std.AutoArrayHashMapUnmanaged(SymbolWithLoc, Atom.Index); @@ -2043,13 +2042,11 @@ fn updateLazySymbol(self: *MachO, decl: Module.Decl.OptionalIndex, metadata: Laz File.LazySymbol.initDecl(.code, decl, mod), atom, self.text_section_index.?, - metadata.alignment, ); if (metadata.data_const_atom) |atom| try self.updateLazySymbolAtom( File.LazySymbol.initDecl(.const_data, decl, mod), atom, self.data_const_section_index.?, - metadata.alignment, ); } @@ -2058,7 +2055,6 @@ fn updateLazySymbolAtom( sym: File.LazySymbol, atom_index: Atom.Index, section_index: u8, - required_alignment: u32, ) !void { const gpa = self.base.allocator; const mod = self.base.options.module.?; @@ -2090,7 +2086,7 @@ fn updateLazySymbolAtom( const res = try codegen.generateLazySymbol(&self.base, src, sym, &code_buffer, .none, .{ .parent_atom_index = local_sym_index, }); - const code = switch (res) { + const code = switch (res.res) { .ok => code_buffer.items, .fail => |em| { log.err("{s}", .{em.msg}); @@ -2104,11 +2100,11 @@ fn updateLazySymbolAtom( symbol.n_sect = section_index + 1; symbol.n_desc = 0; - const vaddr = try self.allocateAtom(atom_index, code.len, required_alignment); + const vaddr = try self.allocateAtom(atom_index, code.len, res.alignment); errdefer self.freeAtom(atom_index); log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr }); - log.debug(" (required alignment 0x{x}", .{required_alignment}); + log.debug(" (required alignment 0x{x}", .{res.alignment}); atom.size = code.len; symbol.n_value = vaddr; @@ -2117,10 +2113,10 @@ fn updateLazySymbolAtom( try self.writeAtom(atom_index, code); } -pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol, alignment: u32) !Atom.Index { +pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.Index { const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); errdefer _ = self.lazy_syms.pop(); - if (!gop.found_existing) gop.value_ptr.* = .{ .alignment = alignment }; + if (!gop.found_existing) gop.value_ptr.* = .{}; const atom = switch (sym.kind) { .code => &gop.value_ptr.text_atom, .const_data => &gop.value_ptr.data_const_atom, -- cgit v1.2.3 From 19bd7d12b0186f0e45c77c564251d6355966b2ef Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 30 Apr 2023 07:30:32 -0400 Subject: x86_64: factor out lazy_sym --- src/arch/x86_64/CodeGen.zig | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index eee89e9ded..99fb516b45 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -6413,10 +6413,10 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); + const mod = self.bin_file.options.module.?; + const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, mod); if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = try elf_file.getOrCreateAtomForLazySymbol( - .{ .kind = .const_data, .ty = Type.anyerror }, - ); + const atom_index = try elf_file.getOrCreateAtomForLazySymbol(lazy_sym); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = atom.getOffsetTableAddress(elf_file); @@ -6426,15 +6426,11 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), ); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = try coff_file.getOrCreateAtomForLazySymbol( - .{ .kind = .const_data, .ty = Type.anyerror }, - ); + const atom_index = try coff_file.getOrCreateAtomForLazySymbol(lazy_sym); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = try macho_file.getOrCreateAtomForLazySymbol( - .{ .kind = .const_data, .ty = Type.anyerror }, - ); + const atom_index = try macho_file.getOrCreateAtomForLazySymbol(lazy_sym); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); } else { @@ -8498,10 +8494,10 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); + const mod = self.bin_file.options.module.?; + const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, mod); if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = try elf_file.getOrCreateAtomForLazySymbol( - .{ .kind = .const_data, .ty = Type.anyerror }, - ); + const atom_index = try elf_file.getOrCreateAtomForLazySymbol(lazy_sym); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = atom.getOffsetTableAddress(elf_file); @@ -8511,15 +8507,11 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), ); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = try coff_file.getOrCreateAtomForLazySymbol( - .{ .kind = .const_data, .ty = Type.anyerror }, - ); + const atom_index = try coff_file.getOrCreateAtomForLazySymbol(lazy_sym); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = try macho_file.getOrCreateAtomForLazySymbol( - .{ .kind = .const_data, .ty = Type.anyerror }, - ); + const atom_index = try macho_file.getOrCreateAtomForLazySymbol(lazy_sym); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); } else { -- cgit v1.2.3 From 47a34d038dd114533c3fcb130e03249ad846fe9c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 30 Apr 2023 21:47:19 -0400 Subject: x86_64: implement tagName --- src/arch/x86_64/CodeGen.zig | 292 ++++++++++++++++++++++++++++++++++++++++---- src/codegen.zig | 40 +++++- src/link/Coff.zig | 33 +++-- src/link/Elf.zig | 31 +++-- src/link/MachO.zig | 33 +++-- test/behavior/enum.zig | 5 - test/behavior/memset.zig | 2 - test/behavior/type.zig | 2 - test/behavior/union.zig | 1 - 9 files changed, 368 insertions(+), 71 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 99fb516b45..71dcbfa461 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -56,7 +56,10 @@ liveness: Liveness, bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, -mod_fn: *const Module.Fn, +owner: union(enum) { + mod_fn: *const Module.Fn, + decl: Module.Decl.Index, +}, err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: InstTracking, @@ -617,7 +620,7 @@ pub fn generate( .target = &bin_file.options.target, .bin_file = bin_file, .debug_output = debug_output, - .mod_fn = module_fn, + .owner = .{ .mod_fn = module_fn }, .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` @@ -745,6 +748,92 @@ pub fn generate( } } +pub fn generateLazy( + bin_file: *link.File, + src_loc: Module.SrcLoc, + lazy_sym: link.File.LazySymbol, + code: *std.ArrayList(u8), + debug_output: DebugInfoOutput, +) CodeGenError!Result { + const gpa = bin_file.allocator; + var function = Self{ + .gpa = gpa, + .air = undefined, + .liveness = undefined, + .target = &bin_file.options.target, + .bin_file = bin_file, + .debug_output = debug_output, + .owner = .{ .decl = lazy_sym.ty.getOwnerDecl() }, + .err_msg = null, + .args = undefined, + .ret_mcv = undefined, + .fn_type = undefined, + .arg_index = undefined, + .src_loc = src_loc, + .end_di_line = undefined, // no debug info yet + .end_di_column = undefined, // no debug info yet + }; + defer { + function.mir_instructions.deinit(gpa); + function.mir_extra.deinit(gpa); + } + + function.genLazy(lazy_sym) catch |err| switch (err) { + error.CodegenFail => return Result{ .fail = function.err_msg.? }, + error.OutOfRegisters => return Result{ + .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), + }, + else => |e| return e, + }; + + var mir = Mir{ + .instructions = function.mir_instructions.toOwnedSlice(), + .extra = try function.mir_extra.toOwnedSlice(bin_file.allocator), + .frame_locs = function.frame_locs.toOwnedSlice(), + }; + defer mir.deinit(bin_file.allocator); + + var emit = Emit{ + .lower = .{ + .allocator = bin_file.allocator, + .mir = mir, + .target = &bin_file.options.target, + .src_loc = src_loc, + }, + .bin_file = bin_file, + .debug_output = debug_output, + .code = code, + .prev_di_pc = undefined, // no debug info yet + .prev_di_line = undefined, // no debug info yet + .prev_di_column = undefined, // no debug info yet + }; + defer emit.deinit(); + emit.emitMir() catch |err| switch (err) { + error.LowerFail, error.EmitFail => return Result{ .fail = emit.lower.err_msg.? }, + error.InvalidInstruction, error.CannotEncode => |e| { + const msg = switch (e) { + error.InvalidInstruction => "CodeGen failed to find a viable instruction.", + error.CannotEncode => "CodeGen failed to encode the instruction.", + }; + return Result{ + .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "{s} This is a bug in the Zig compiler.", + .{msg}, + ), + }; + }, + else => |e| return e, + }; + + if (function.err_msg) |em| { + return Result{ .fail = em }; + } else { + return Result.ok; + } +} + const FormatDeclData = struct { mod: *Module, decl_index: Module.Decl.Index, @@ -1545,6 +1634,103 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { verbose_tracking_log.debug("{}", .{self.fmtTracking()}); } +fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { + switch (lazy_sym.ty.zigTypeTag()) { + .Enum => { + const enum_ty = lazy_sym.ty; + wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(self.bin_file.options.module.?)}); + + const param_regs = abi.getCAbiIntParamRegs(self.target.*); + const param_locks = self.register_manager.lockRegsAssumeUnused(2, param_regs[0..2].*); + defer for (param_locks) |lock| self.register_manager.unlockReg(lock); + + const ret_reg = param_regs[0]; + const enum_mcv = MCValue{ .register = param_regs[1] }; + + var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount()); + defer self.gpa.free(exitlude_jump_relocs); + + const data_reg = try self.register_manager.allocReg(null, gp); + const data_lock = self.register_manager.lockRegAssumeUnused(data_reg); + defer self.register_manager.unlockReg(data_lock); + + const data_lazy_sym = link.File.LazySymbol{ .kind = .const_data, .ty = enum_ty }; + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const atom_index = elf_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const atom = elf_file.getAtom(atom_index); + _ = try atom.getOrCreateOffsetTableEntry(elf_file); + const got_addr = atom.getOffsetTableAddress(elf_file); + try self.asmRegisterMemory( + .mov, + data_reg.to64(), + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), + ); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const atom_index = coff_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; + try self.genSetReg(data_reg, Type.usize, .{ .lea_got = sym_index }); + } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + const atom_index = macho_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; + try self.genSetReg(data_reg, Type.usize, .{ .lea_got = sym_index }); + } else { + return self.fail("TODO implement {s} for {}", .{ + @tagName(lazy_sym.kind), + lazy_sym.ty.fmt(self.bin_file.options.module.?), + }); + } + + var data_off: i32 = 0; + for ( + exitlude_jump_relocs, + enum_ty.enumFields().keys(), + 0.., + ) |*exitlude_jump_reloc, tag_name, index| { + var tag_pl = Value.Payload.U32{ + .base = .{ .tag = .enum_field_index }, + .data = @intCast(u32, index), + }; + const tag_val = Value.initPayload(&tag_pl.base); + const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); + try self.genBinOpMir(.cmp, enum_ty, enum_mcv, tag_mcv); + const skip_reloc = try self.asmJccReloc(undefined, .ne); + + try self.genSetMem( + .{ .reg = ret_reg }, + 0, + Type.usize, + .{ .register_offset = .{ .reg = data_reg, .off = data_off } }, + ); + try self.genSetMem(.{ .reg = ret_reg }, 8, Type.usize, .{ .immediate = tag_name.len }); + + exitlude_jump_reloc.* = try self.asmJmpReloc(undefined); + try self.performReloc(skip_reloc); + + data_off += @intCast(i32, tag_name.len + 1); + } + + try self.airTrap(); + + for (exitlude_jump_relocs) |reloc| try self.performReloc(reloc); + try self.asmOpOnly(.ret); + }, + else => return self.fail( + "TODO implement {s} for {}", + .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(self.bin_file.options.module.?) }, + ), + } +} + +fn getOwnerDecl(self: *const Self) Module.Decl.Index { + return switch (self.owner) { + .mod_fn => |mod_fn| mod_fn.owner_decl, + .decl => |index| index, + }; +} + fn getValue(self: *Self, value: MCValue, inst: ?Air.Inst.Index) void { const reg = value.getReg() orelse return; if (self.register_manager.isRegFree(reg)) { @@ -6020,7 +6206,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const ty = self.air.typeOfIndex(inst); const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); + const name = self.owner.mod_fn.getParamName(self.bin_file.options.module.?, src_index); try self.genArgDbgInfo(ty, name, dst_mcv); break :result dst_mcv; @@ -6044,7 +6230,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { //}, else => unreachable, // not a valid function parameter }; - try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, loc); + try dw.genArgDbgInfo(name, ty, self.getOwnerDecl(), loc); }, .plan9 => {}, .none => {}, @@ -6085,7 +6271,7 @@ fn genVarDbgInfo( break :blk .nop; }, }; - try dw.genVarDbgInfo(name, ty, self.mod_fn.owner_decl, is_ptr, loc); + try dw.genVarDbgInfo(name, ty, self.getOwnerDecl(), is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -6243,7 +6429,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); const lib_name = mem.sliceTo(extern_fn.lib_name, 0); if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl); + const atom_index = try self.getSymbolIndexForDecl(self.getOwnerDecl()); const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); _ = try self.addInst(.{ .tag = .mov_linker, @@ -6257,7 +6443,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier try self.asmRegister(.call, .rax); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); - const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl); + const atom_index = try self.getSymbolIndexForDecl(self.getOwnerDecl()); _ = try self.addInst(.{ .tag = .call_extern, .ops = undefined, @@ -6416,7 +6602,8 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, mod); if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = try elf_file.getOrCreateAtomForLazySymbol(lazy_sym); + const atom_index = elf_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = atom.getOffsetTableAddress(elf_file); @@ -6426,11 +6613,13 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), ); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = try coff_file.getOrCreateAtomForLazySymbol(lazy_sym); + const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = try macho_file.getOrCreateAtomForLazySymbol(lazy_sym); + const atom_index = macho_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); } else { @@ -7530,7 +7719,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }), ), .load_direct => |sym_index| if (try self.movMirTag(ty) == .mov) { - const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl); + const atom_index = try self.getSymbolIndexForDecl(self.getOwnerDecl()); _ = try self.addInst(.{ .tag = .mov_linker, .ops = .direct_reloc, @@ -7557,7 +7746,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ); }, .lea_direct, .lea_got => |sym_index| { - const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl); + const atom_index = try self.getSymbolIndexForDecl(self.getOwnerDecl()); _ = try self.addInst(.{ .tag = switch (src_mcv) { .lea_direct => .lea_linker, @@ -7577,7 +7766,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }); }, .lea_tlv => |sym_index| { - const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl); + const atom_index = try self.getSymbolIndexForDecl(self.getOwnerDecl()); if (self.bin_file.cast(link.File.MachO)) |_| { _ = try self.addInst(.{ .tag = .lea_linker, @@ -8475,10 +8664,64 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; + const inst_ty = self.air.typeOfIndex(inst); + const enum_ty = self.air.typeOf(un_op); + + // We need a properly aligned and sized call frame to be able to call this function. + { + const needed_call_frame = FrameAlloc.init(.{ + .size = inst_ty.abiSize(self.target.*), + .alignment = inst_ty.abiAlignment(self.target.*), + }); + const frame_allocs_slice = self.frame_allocs.slice(); + const stack_frame_size = + &frame_allocs_slice.items(.abi_size)[@enumToInt(FrameIndex.call_frame)]; + stack_frame_size.* = @max(stack_frame_size.*, needed_call_frame.abi_size); + const stack_frame_align = + &frame_allocs_slice.items(.abi_align)[@enumToInt(FrameIndex.call_frame)]; + stack_frame_align.* = @max(stack_frame_align.*, needed_call_frame.abi_align); + } + + try self.spillEflagsIfOccupied(); + try self.spillRegisters(abi.getCallerPreservedRegs(self.target.*)); + + const param_regs = abi.getCAbiIntParamRegs(self.target.*); + + const dst_mcv = try self.allocRegOrMem(inst, false); + try self.genSetReg(param_regs[0], Type.usize, dst_mcv.address()); + const operand = try self.resolveInst(un_op); - _ = operand; - return self.fail("TODO implement airTagName for x86_64", .{}); - //return self.finishAir(inst, result, .{ un_op, .none, .none }); + try self.genSetReg(param_regs[1], enum_ty, operand); + + const mod = self.bin_file.options.module.?; + const lazy_sym = link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod); + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const atom_index = elf_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const atom = elf_file.getAtom(atom_index); + _ = try atom.getOrCreateOffsetTableEntry(elf_file); + const got_addr = atom.getOffsetTableAddress(elf_file); + try self.asmMemory( + .call, + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), + ); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; + try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); + try self.asmRegister(.call, .rax); + } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + const atom_index = macho_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; + try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); + try self.asmRegister(.call, .rax); + } else { + return self.fail("TODO implement airTagName for x86_64 {s}", .{@tagName(self.bin_file.tag)}); + } + + return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { @@ -8497,7 +8740,8 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, mod); if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = try elf_file.getOrCreateAtomForLazySymbol(lazy_sym); + const atom_index = elf_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = atom.getOffsetTableAddress(elf_file); @@ -8507,11 +8751,13 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), ); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = try coff_file.getOrCreateAtomForLazySymbol(lazy_sym); + const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = try macho_file.getOrCreateAtomForLazySymbol(lazy_sym); + const atom_index = macho_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); } else { @@ -8833,12 +9079,7 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV } fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { - const mcv: MCValue = switch (try codegen.genTypedValue( - self.bin_file, - self.src_loc, - arg_tv, - self.mod_fn.owner_decl, - )) { + return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.getOwnerDecl())) { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, @@ -8853,7 +9094,6 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { return error.CodegenFail; }, }; - return mcv; } const CallMCValues = struct { diff --git a/src/codegen.zig b/src/codegen.zig index 690e96d25c..078feb409d 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -7,6 +7,7 @@ const link = @import("link.zig"); const log = std.log.scoped(.codegen); const mem = std.mem; const math = std.math; +const target_util = @import("target.zig"); const trace = @import("tracy.zig").trace; const Air = @import("Air.zig"); @@ -89,6 +90,19 @@ pub fn generateFunction( } } +pub fn generateLazyFunction( + bin_file: *link.File, + src_loc: Module.SrcLoc, + lazy_sym: link.File.LazySymbol, + code: *std.ArrayList(u8), + debug_output: DebugInfoOutput, +) CodeGenError!Result { + switch (bin_file.options.target.cpu.arch) { + .x86_64 => return @import("arch/x86_64/CodeGen.zig").generateLazy(bin_file, src_loc, lazy_sym, code, debug_output), + else => unreachable, + } +} + fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian, code: []u8) void { _ = target; const bits = @typeInfo(F).Float.bits; @@ -101,11 +115,11 @@ pub fn generateLazySymbol( bin_file: *link.File, src_loc: Module.SrcLoc, lazy_sym: link.File.LazySymbol, + alignment: *u32, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, -) CodeGenError!struct { res: Result, alignment: u32 } { - _ = debug_output; +) CodeGenError!Result { _ = reloc_info; const tracy = trace(@src()); @@ -120,7 +134,13 @@ pub fn generateLazySymbol( lazy_sym.ty.fmt(mod), }); - if (lazy_sym.kind == .const_data and lazy_sym.ty.isAnyError()) { + if (lazy_sym.kind == .code) { + alignment.* = target_util.defaultFunctionAlignment(target); + return generateLazyFunction(bin_file, src_loc, lazy_sym, code, debug_output); + } + + if (lazy_sym.ty.isAnyError()) { + alignment.* = 4; const err_names = mod.error_name_list.items; mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian); var offset = code.items.len; @@ -133,13 +153,21 @@ pub fn generateLazySymbol( code.appendAssumeCapacity(0); } mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); - return .{ .res = Result.ok, .alignment = 4 }; - } else return .{ .res = .{ .fail = try ErrorMsg.create( + return Result.ok; + } else if (lazy_sym.ty.zigTypeTag() == .Enum) { + alignment.* = 1; + for (lazy_sym.ty.enumFields().keys()) |tag_name| { + try code.ensureUnusedCapacity(tag_name.len + 1); + code.appendSliceAssumeCapacity(tag_name); + code.appendAssumeCapacity(0); + } + return Result.ok; + } else return .{ .fail = try ErrorMsg.create( bin_file.allocator, src_loc, "TODO implement generateLazySymbol for {s} {}", .{ @tagName(lazy_sym.kind), lazy_sym.ty.fmt(mod) }, - ) }, .alignment = undefined }; + ) }; } pub fn generateSymbol( diff --git a/src/link/Coff.zig b/src/link/Coff.zig index deac8d1fd6..41d4617e53 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1218,6 +1218,7 @@ fn updateLazySymbolAtom( const gpa = self.base.allocator; const mod = self.base.options.module.?; + var required_alignment: u32 = undefined; var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); @@ -1238,10 +1239,16 @@ fn updateLazySymbolAtom( .parent_decl_node = undefined, .lazy = .unneeded, }; - const res = try codegen.generateLazySymbol(&self.base, src, sym, &code_buffer, .none, .{ - .parent_atom_index = local_sym_index, - }); - const code = switch (res.res) { + const res = try codegen.generateLazySymbol( + &self.base, + src, + sym, + &required_alignment, + &code_buffer, + .none, + .{ .parent_atom_index = local_sym_index }, + ); + const code = switch (res) { .ok => code_buffer.items, .fail => |em| { log.err("{s}", .{em.msg}); @@ -1255,11 +1262,11 @@ fn updateLazySymbolAtom( symbol.section_number = @intToEnum(coff.SectionNumber, section_index + 1); symbol.type = .{ .complex_type = .NULL, .base_type = .NULL }; - const vaddr = try self.allocateAtom(atom_index, code_len, res.alignment); + const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment); errdefer self.freeAtom(atom_index); log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr }); - log.debug(" (required alignment 0x{x})", .{res.alignment}); + log.debug(" (required alignment 0x{x})", .{required_alignment}); atom.size = code_len; symbol.value = vaddr; @@ -1270,14 +1277,20 @@ fn updateLazySymbolAtom( pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Atom.Index { const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); - errdefer _ = self.lazy_syms.pop(); + errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; - const atom = switch (sym.kind) { + const atom_ptr = switch (sym.kind) { .code => &gop.value_ptr.text_atom, .const_data => &gop.value_ptr.rdata_atom, }; - if (atom.* == null) atom.* = try self.createAtom(); - return atom.*.?; + if (atom_ptr.*) |atom| return atom; + const atom = try self.createAtom(); + atom_ptr.* = atom; + try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + .code => self.text_section_index.?, + .const_data => self.rdata_section_index.?, + }); + return atom; } pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.Index { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 29ae97150e..ec113e5c8a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2376,14 +2376,20 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void { pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Index { const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); - errdefer _ = self.lazy_syms.pop(); + errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; - const atom = switch (sym.kind) { + const atom_ptr = switch (sym.kind) { .code => &gop.value_ptr.text_atom, .const_data => &gop.value_ptr.rodata_atom, }; - if (atom.* == null) atom.* = try self.createAtom(); - return atom.*.?; + if (atom_ptr.*) |atom| return atom; + const atom = try self.createAtom(); + atom_ptr.* = atom; + try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + .code => self.text_section_index.?, + .const_data => self.rodata_section_index.?, + }); + return atom; } pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.Index { @@ -2684,6 +2690,7 @@ fn updateLazySymbolAtom( const gpa = self.base.allocator; const mod = self.base.options.module.?; + var required_alignment: u32 = undefined; var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); @@ -2708,10 +2715,16 @@ fn updateLazySymbolAtom( .parent_decl_node = undefined, .lazy = .unneeded, }; - const res = try codegen.generateLazySymbol(&self.base, src, sym, &code_buffer, .none, .{ - .parent_atom_index = local_sym_index, - }); - const code = switch (res.res) { + const res = try codegen.generateLazySymbol( + &self.base, + src, + sym, + &required_alignment, + &code_buffer, + .none, + .{ .parent_atom_index = local_sym_index }, + ); + const code = switch (res) { .ok => code_buffer.items, .fail => |em| { log.err("{s}", .{em.msg}); @@ -2729,7 +2742,7 @@ fn updateLazySymbolAtom( .st_value = 0, .st_size = 0, }; - const vaddr = try self.allocateAtom(atom_index, code.len, res.alignment); + const vaddr = try self.allocateAtom(atom_index, code.len, required_alignment); errdefer self.freeAtom(atom_index); log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr }); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 5136bd84a2..c14168c66b 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -2060,6 +2060,7 @@ fn updateLazySymbolAtom( const gpa = self.base.allocator; const mod = self.base.options.module.?; + var required_alignment: u32 = undefined; var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); @@ -2084,10 +2085,16 @@ fn updateLazySymbolAtom( .parent_decl_node = undefined, .lazy = .unneeded, }; - const res = try codegen.generateLazySymbol(&self.base, src, sym, &code_buffer, .none, .{ - .parent_atom_index = local_sym_index, - }); - const code = switch (res.res) { + const res = try codegen.generateLazySymbol( + &self.base, + src, + sym, + &required_alignment, + &code_buffer, + .none, + .{ .parent_atom_index = local_sym_index }, + ); + const code = switch (res) { .ok => code_buffer.items, .fail => |em| { log.err("{s}", .{em.msg}); @@ -2101,11 +2108,11 @@ fn updateLazySymbolAtom( symbol.n_sect = section_index + 1; symbol.n_desc = 0; - const vaddr = try self.allocateAtom(atom_index, code.len, res.alignment); + const vaddr = try self.allocateAtom(atom_index, code.len, required_alignment); errdefer self.freeAtom(atom_index); log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr }); - log.debug(" (required alignment 0x{x}", .{res.alignment}); + log.debug(" (required alignment 0x{x}", .{required_alignment}); atom.size = code.len; symbol.n_value = vaddr; @@ -2116,14 +2123,20 @@ fn updateLazySymbolAtom( pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.Index { const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); - errdefer _ = self.lazy_syms.pop(); + errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; - const atom = switch (sym.kind) { + const atom_ptr = switch (sym.kind) { .code => &gop.value_ptr.text_atom, .const_data => &gop.value_ptr.data_const_atom, }; - if (atom.* == null) atom.* = try self.createAtom(); - return atom.*.?; + if (atom_ptr.*) |atom| return atom; + const atom = try self.createAtom(); + atom_ptr.* = atom; + try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + .code => self.text_section_index.?, + .const_data => self.data_const_section_index.?, + }); + return atom; } fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void { diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index de5c0efb8e..26b941bcdc 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -981,7 +981,6 @@ fn test3_2(f: Test3Foo) !void { } test "@tagName" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -997,7 +996,6 @@ fn testEnumTagNameBare(n: anytype) []const u8 { const BareNumber = enum { One, Two, Three }; test "@tagName non-exhaustive enum" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1008,7 +1006,6 @@ test "@tagName non-exhaustive enum" { const NonExhaustive = enum(u8) { A, B, _ }; test "@tagName is null-terminated" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1023,7 +1020,6 @@ test "@tagName is null-terminated" { } test "tag name with assigned enum values" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1113,7 +1109,6 @@ test "enum literal in array literal" { test "tag name functions are unique" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/memset.zig b/test/behavior/memset.zig index 2cc390a3c9..89e01a0e56 100644 --- a/test/behavior/memset.zig +++ b/test/behavior/memset.zig @@ -114,7 +114,6 @@ test "memset with large array element, runtime known" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const A = [128]u64; var buf: [5]A = undefined; @@ -132,7 +131,6 @@ test "memset with large array element, comptime known" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const A = [128]u64; var buf: [5]A = undefined; diff --git a/test/behavior/type.zig b/test/behavior/type.zig index 0d309b9a6e..695a81b1a3 100644 --- a/test/behavior/type.zig +++ b/test/behavior/type.zig @@ -258,7 +258,6 @@ test "Type.ErrorSet" { test "Type.Struct" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -383,7 +382,6 @@ test "Type.Enum" { test "Type.Union" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 76c5b09a89..f90a336e76 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1094,7 +1094,6 @@ test "containers with single-field enums" { test "@unionInit on union with tag but no fields" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 0bdfb288ccc58807355221d3e86451c33e390d01 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 1 May 2023 00:39:31 -0400 Subject: x86_64: workaround tagName linker issues Pass extra pointer param with a linker ref when calling the lazy tagName function to workaround not being able to lower linker refs during codegen of a lazy func. --- src/arch/x86_64/CodeGen.zig | 79 +++++++++++++++++++-------------------------- 1 file changed, 34 insertions(+), 45 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 71dcbfa461..6f71e0b810 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1641,48 +1641,16 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(self.bin_file.options.module.?)}); const param_regs = abi.getCAbiIntParamRegs(self.target.*); - const param_locks = self.register_manager.lockRegsAssumeUnused(2, param_regs[0..2].*); + const param_locks = self.register_manager.lockRegsAssumeUnused(3, param_regs[0..3].*); defer for (param_locks) |lock| self.register_manager.unlockReg(lock); const ret_reg = param_regs[0]; const enum_mcv = MCValue{ .register = param_regs[1] }; + const data_mcv = MCValue{ .register = param_regs[2] }; var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount()); defer self.gpa.free(exitlude_jump_relocs); - const data_reg = try self.register_manager.allocReg(null, gp); - const data_lock = self.register_manager.lockRegAssumeUnused(data_reg); - defer self.register_manager.unlockReg(data_lock); - - const data_lazy_sym = link.File.LazySymbol{ .kind = .const_data, .ty = enum_ty }; - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = elf_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const atom = elf_file.getAtom(atom_index); - _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = atom.getOffsetTableAddress(elf_file); - try self.asmRegisterMemory( - .mov, - data_reg.to64(), - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), - ); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = coff_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(data_reg, Type.usize, .{ .lea_got = sym_index }); - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = macho_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(data_reg, Type.usize, .{ .lea_got = sym_index }); - } else { - return self.fail("TODO implement {s} for {}", .{ - @tagName(lazy_sym.kind), - lazy_sym.ty.fmt(self.bin_file.options.module.?), - }); - } - var data_off: i32 = 0; for ( exitlude_jump_relocs, @@ -1698,12 +1666,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { try self.genBinOpMir(.cmp, enum_ty, enum_mcv, tag_mcv); const skip_reloc = try self.asmJccReloc(undefined, .ne); - try self.genSetMem( - .{ .reg = ret_reg }, - 0, - Type.usize, - .{ .register_offset = .{ .reg = data_reg, .off = data_off } }, - ); + try self.genSetMem(.{ .reg = ret_reg }, 0, Type.usize, data_mcv.offset(data_off)); try self.genSetMem(.{ .reg = ret_reg }, 8, Type.usize, .{ .immediate = tag_name.len }); exitlude_jump_reloc.* = try self.asmJmpReloc(undefined); @@ -8663,6 +8626,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { } fn airTagName(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const inst_ty = self.air.typeOfIndex(inst); const enum_ty = self.air.typeOf(un_op); @@ -8693,10 +8657,35 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(un_op); try self.genSetReg(param_regs[1], enum_ty, operand); - const mod = self.bin_file.options.module.?; - const lazy_sym = link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod); + const data_lazy_sym = link.File.LazySymbol.initDecl(.const_data, enum_ty.getOwnerDecl(), mod); if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = elf_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + const atom_index = elf_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const atom = elf_file.getAtom(atom_index); + _ = try atom.getOrCreateOffsetTableEntry(elf_file); + const got_addr = atom.getOffsetTableAddress(elf_file); + try self.asmRegisterMemory( + .mov, + param_regs[2], + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), + ); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const atom_index = coff_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; + try self.genSetReg(param_regs[2], Type.usize, .{ .lea_got = sym_index }); + } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + const atom_index = macho_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; + try self.genSetReg(param_regs[2], Type.usize, .{ .lea_got = sym_index }); + } else { + return self.fail("TODO implement airTagName for x86_64 {s}", .{@tagName(self.bin_file.tag)}); + } + + const code_lazy_sym = link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod); + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const atom_index = elf_file.getOrCreateAtomForLazySymbol(code_lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); @@ -8706,13 +8695,13 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), ); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + const atom_index = coff_file.getOrCreateAtomForLazySymbol(code_lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); try self.asmRegister(.call, .rax); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = macho_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + const atom_index = macho_file.getOrCreateAtomForLazySymbol(code_lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); -- cgit v1.2.3 From 7064d7dbf0157aed9b497e1158243e472082633f Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 1 May 2023 09:54:42 +0200 Subject: Revert "x86_64: workaround tagName linker issues" This reverts commit aac97b92532e7492b9145e1562e31c2e1fa66c15. --- src/arch/x86_64/CodeGen.zig | 79 ++++++++++++++++++++++++++------------------- 1 file changed, 45 insertions(+), 34 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 6f71e0b810..71dcbfa461 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1641,16 +1641,48 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(self.bin_file.options.module.?)}); const param_regs = abi.getCAbiIntParamRegs(self.target.*); - const param_locks = self.register_manager.lockRegsAssumeUnused(3, param_regs[0..3].*); + const param_locks = self.register_manager.lockRegsAssumeUnused(2, param_regs[0..2].*); defer for (param_locks) |lock| self.register_manager.unlockReg(lock); const ret_reg = param_regs[0]; const enum_mcv = MCValue{ .register = param_regs[1] }; - const data_mcv = MCValue{ .register = param_regs[2] }; var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount()); defer self.gpa.free(exitlude_jump_relocs); + const data_reg = try self.register_manager.allocReg(null, gp); + const data_lock = self.register_manager.lockRegAssumeUnused(data_reg); + defer self.register_manager.unlockReg(data_lock); + + const data_lazy_sym = link.File.LazySymbol{ .kind = .const_data, .ty = enum_ty }; + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const atom_index = elf_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const atom = elf_file.getAtom(atom_index); + _ = try atom.getOrCreateOffsetTableEntry(elf_file); + const got_addr = atom.getOffsetTableAddress(elf_file); + try self.asmRegisterMemory( + .mov, + data_reg.to64(), + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), + ); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const atom_index = coff_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; + try self.genSetReg(data_reg, Type.usize, .{ .lea_got = sym_index }); + } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + const atom_index = macho_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; + try self.genSetReg(data_reg, Type.usize, .{ .lea_got = sym_index }); + } else { + return self.fail("TODO implement {s} for {}", .{ + @tagName(lazy_sym.kind), + lazy_sym.ty.fmt(self.bin_file.options.module.?), + }); + } + var data_off: i32 = 0; for ( exitlude_jump_relocs, @@ -1666,7 +1698,12 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { try self.genBinOpMir(.cmp, enum_ty, enum_mcv, tag_mcv); const skip_reloc = try self.asmJccReloc(undefined, .ne); - try self.genSetMem(.{ .reg = ret_reg }, 0, Type.usize, data_mcv.offset(data_off)); + try self.genSetMem( + .{ .reg = ret_reg }, + 0, + Type.usize, + .{ .register_offset = .{ .reg = data_reg, .off = data_off } }, + ); try self.genSetMem(.{ .reg = ret_reg }, 8, Type.usize, .{ .immediate = tag_name.len }); exitlude_jump_reloc.* = try self.asmJmpReloc(undefined); @@ -8626,7 +8663,6 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { } fn airTagName(self: *Self, inst: Air.Inst.Index) !void { - const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const inst_ty = self.air.typeOfIndex(inst); const enum_ty = self.air.typeOf(un_op); @@ -8657,35 +8693,10 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(un_op); try self.genSetReg(param_regs[1], enum_ty, operand); - const data_lazy_sym = link.File.LazySymbol.initDecl(.const_data, enum_ty.getOwnerDecl(), mod); - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = elf_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const atom = elf_file.getAtom(atom_index); - _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = atom.getOffsetTableAddress(elf_file); - try self.asmRegisterMemory( - .mov, - param_regs[2], - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), - ); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = coff_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(param_regs[2], Type.usize, .{ .lea_got = sym_index }); - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = macho_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(param_regs[2], Type.usize, .{ .lea_got = sym_index }); - } else { - return self.fail("TODO implement airTagName for x86_64 {s}", .{@tagName(self.bin_file.tag)}); - } - - const code_lazy_sym = link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod); + const mod = self.bin_file.options.module.?; + const lazy_sym = link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod); if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = elf_file.getOrCreateAtomForLazySymbol(code_lazy_sym) catch |err| + const atom_index = elf_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); @@ -8695,13 +8706,13 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), ); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = coff_file.getOrCreateAtomForLazySymbol(code_lazy_sym) catch |err| + const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); try self.asmRegister(.call, .rax); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = macho_file.getOrCreateAtomForLazySymbol(code_lazy_sym) catch |err| + const atom_index = macho_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); -- cgit v1.2.3 From 565f8979cc38d46b8b905f3a8b7db03238779ffc Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 1 May 2023 13:41:42 +0200 Subject: link: fix accessing source atom's symbol index in codegen Since the owner can either be a `Decl` or a `LazySymbol` we need to preserve this information at the codegen generate function level so that we can then correctly work out the corresponding `Atom` in the linker. --- src/arch/x86_64/CodeGen.zig | 94 +++++++++++++++++++++++++++++---------------- src/link/MachO.zig | 7 +--- 2 files changed, 62 insertions(+), 39 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 71dcbfa461..d5c5938ba0 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -56,10 +56,7 @@ liveness: Liveness, bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, -owner: union(enum) { - mod_fn: *const Module.Fn, - decl: Module.Decl.Index, -}, +owner: Owner, err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: InstTracking, @@ -111,6 +108,44 @@ const mir_to_air_map_init = if (builtin.mode == .Debug) std.AutoHashMapUnmanaged const FrameAddr = struct { index: FrameIndex, off: i32 = 0 }; const RegisterOffset = struct { reg: Register, off: i32 = 0 }; +const Owner = union(enum) { + mod_fn: *const Module.Fn, + lazy_sym: link.File.LazySymbol, + + fn getOwnerDecl(owner: Owner) Module.Decl.Index { + return switch (owner) { + .mod_fn => |mod_fn| mod_fn.owner_decl, + .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(), + }; + } + + fn getSymbolIndex(owner: Owner, ctx: *Self) !u32 { + switch (owner) { + .mod_fn => |mod_fn| { + const decl_index = mod_fn.owner_decl; + if (ctx.bin_file.cast(link.File.MachO)) |macho_file| { + const atom = try macho_file.getOrCreateAtomForDecl(decl_index); + return macho_file.getAtom(atom).getSymbolIndex().?; + } else if (ctx.bin_file.cast(link.File.Coff)) |coff_file| { + const atom = try coff_file.getOrCreateAtomForDecl(decl_index); + return coff_file.getAtom(atom).getSymbolIndex().?; + } else unreachable; + }, + .lazy_sym => |lazy_sym| { + if (ctx.bin_file.cast(link.File.MachO)) |macho_file| { + const atom = macho_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); + return macho_file.getAtom(atom).getSymbolIndex().?; + } else if (ctx.bin_file.cast(link.File.Coff)) |coff_file| { + const atom = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); + return coff_file.getAtom(atom).getSymbolIndex().?; + } else unreachable; + }, + } + } +}; + pub const MCValue = union(enum) { /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. /// TODO Look into deleting this tag and using `dead` instead, since every use @@ -763,7 +798,7 @@ pub fn generateLazy( .target = &bin_file.options.target, .bin_file = bin_file, .debug_output = debug_output, - .owner = .{ .decl = lazy_sym.ty.getOwnerDecl() }, + .owner = .{ .lazy_sym = lazy_sym }, .err_msg = null, .args = undefined, .ret_mcv = undefined, @@ -1724,13 +1759,6 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { } } -fn getOwnerDecl(self: *const Self) Module.Decl.Index { - return switch (self.owner) { - .mod_fn => |mod_fn| mod_fn.owner_decl, - .decl => |index| index, - }; -} - fn getValue(self: *Self, value: MCValue, inst: ?Air.Inst.Index) void { const reg = value.getReg() orelse return; if (self.register_manager.isRegFree(reg)) { @@ -6230,7 +6258,10 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { //}, else => unreachable, // not a valid function parameter }; - try dw.genArgDbgInfo(name, ty, self.getOwnerDecl(), loc); + // TODO: this might need adjusting like the linkers do. + // Instead of flattening the owner and passing Decl.Index here we may + // want to special case LazySymbol in DWARF linker too. + try dw.genArgDbgInfo(name, ty, self.owner.getOwnerDecl(), loc); }, .plan9 => {}, .none => {}, @@ -6271,7 +6302,10 @@ fn genVarDbgInfo( break :blk .nop; }, }; - try dw.genVarDbgInfo(name, ty, self.getOwnerDecl(), is_ptr, loc); + // TODO: this might need adjusting like the linkers do. + // Instead of flattening the owner and passing Decl.Index here we may + // want to special case LazySymbol in DWARF linker too. + try dw.genVarDbgInfo(name, ty, self.owner.getOwnerDecl(), is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -6403,12 +6437,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr), })); - } else if (self.bin_file.cast(link.File.Coff)) |_| { - const sym_index = try self.getSymbolIndexForDecl(func.owner_decl); + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl); + const sym_index = coff_file.getAtom(atom).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); try self.asmRegister(.call, .rax); - } else if (self.bin_file.cast(link.File.MachO)) |_| { - const sym_index = try self.getSymbolIndexForDecl(func.owner_decl); + } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); + const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); try self.asmRegister(.call, .rax); } else if (self.bin_file.cast(link.File.Plan9)) |p9| { @@ -6429,7 +6465,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); const lib_name = mem.sliceTo(extern_fn.lib_name, 0); if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = try self.getSymbolIndexForDecl(self.getOwnerDecl()); + const atom_index = try self.owner.getSymbolIndex(self); const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); _ = try self.addInst(.{ .tag = .mov_linker, @@ -6442,8 +6478,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier }); try self.asmRegister(.call, .rax); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + const atom_index = try self.owner.getSymbolIndex(self); const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); - const atom_index = try self.getSymbolIndexForDecl(self.getOwnerDecl()); _ = try self.addInst(.{ .tag = .call_extern, .ops = undefined, @@ -7719,7 +7755,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }), ), .load_direct => |sym_index| if (try self.movMirTag(ty) == .mov) { - const atom_index = try self.getSymbolIndexForDecl(self.getOwnerDecl()); + const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ .tag = .mov_linker, .ops = .direct_reloc, @@ -7746,7 +7782,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ); }, .lea_direct, .lea_got => |sym_index| { - const atom_index = try self.getSymbolIndexForDecl(self.getOwnerDecl()); + const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ .tag = switch (src_mcv) { .lea_direct => .lea_linker, @@ -7766,7 +7802,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }); }, .lea_tlv => |sym_index| { - const atom_index = try self.getSymbolIndexForDecl(self.getOwnerDecl()); + const atom_index = try self.owner.getSymbolIndex(self); if (self.bin_file.cast(link.File.MachO)) |_| { _ = try self.addInst(.{ .tag = .lea_linker, @@ -9079,7 +9115,7 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV } fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { - return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.getOwnerDecl())) { + return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getOwnerDecl())) { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, @@ -9390,13 +9426,3 @@ fn regBitSize(self: *Self, ty: Type) u64 { fn regExtraBits(self: *Self, ty: Type) u64 { return self.regBitSize(ty) - ty.bitSize(self.target.*); } - -fn getSymbolIndexForDecl(self: *Self, decl_index: Module.Decl.Index) !u32 { - if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom = try macho_file.getOrCreateAtomForDecl(decl_index); - return macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom = try coff_file.getOrCreateAtomForDecl(decl_index); - return coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; -} diff --git a/src/link/MachO.zig b/src/link/MachO.zig index c14168c66b..458b283d4f 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -2112,7 +2112,7 @@ fn updateLazySymbolAtom( errdefer self.freeAtom(atom_index); log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr }); - log.debug(" (required alignment 0x{x}", .{required_alignment}); + log.debug(" (required alignment 0x{x})", .{required_alignment}); atom.size = code.len; symbol.n_value = vaddr; @@ -4157,9 +4157,6 @@ pub fn logSymtab(self: *MachO) void { log.debug("stubs entries:", .{}); log.debug("{}", .{self.stub_table}); - - // log.debug("threadlocal entries:", .{}); - // log.debug("{}", .{self.tlv_table}); } pub fn logAtoms(self: *MachO) void { @@ -4199,6 +4196,6 @@ pub fn logAtom(self: *MachO, atom_index: Atom.Index) void { sym.n_value, atom.size, atom.file, - sym.n_sect, + sym.n_sect + 1, }); } -- cgit v1.2.3 From db88b414722e698a392ec65a3ef46730341aea25 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 1 May 2023 16:58:55 -0400 Subject: x86_64: fix switch multi-prongs and mul/div flags clobber --- src/arch/x86_64/CodeGen.zig | 10 ++++++---- test/behavior/inline_switch.zig | 1 - test/behavior/switch.zig | 4 ---- test/behavior/union.zig | 1 - 4 files changed, 6 insertions(+), 10 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index d5c5938ba0..8e61af1c9f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2435,6 +2435,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { } }; const src_ty = Type.initPayload(&src_pl.base); + try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -7236,15 +7237,16 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { var relocs = try self.gpa.alloc(u32, items.len); defer self.gpa.free(relocs); - for (items, relocs) |item, *reloc| { - try self.spillEflagsIfOccupied(); + try self.spillEflagsIfOccupied(); + for (items, relocs, 0..) |item, *reloc, i| { const item_mcv = try self.resolveInst(item); try self.genBinOpMir(.cmp, condition_ty, condition, item_mcv); - reloc.* = try self.asmJccReloc(undefined, .ne); + reloc.* = try self.asmJccReloc(undefined, if (i < relocs.len - 1) .e else .ne); } for (liveness.deaths[case_i]) |operand| self.processDeath(operand); + for (relocs[0 .. relocs.len - 1]) |reloc| try self.performReloc(reloc); try self.genBody(case_body); try self.restoreState(state, &.{}, .{ .emit_instructions = false, @@ -7253,7 +7255,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { .close_scope = true, }); - for (relocs) |reloc| try self.performReloc(reloc); + try self.performReloc(relocs[relocs.len - 1]); } if (switch_br.data.else_body_len > 0) { diff --git a/test/behavior/inline_switch.zig b/test/behavior/inline_switch.zig index dcd603c94f..6e5013d83b 100644 --- a/test/behavior/inline_switch.zig +++ b/test/behavior/inline_switch.zig @@ -94,7 +94,6 @@ test "inline else error" { test "inline else enum" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const E2 = enum(u8) { a = 2, b = 3, c = 4, d = 5 }; diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 3924d1d6c1..5e2d6d28c1 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -88,7 +88,6 @@ fn nonConstSwitch(foo: SwitchStatementFoo) !void { const SwitchStatementFoo = enum { A, B, C, D }; test "switch with multiple expressions" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const x = switch (returnsFive()) { @@ -275,7 +274,6 @@ fn testSwitchEnumPtrCapture() !void { } test "switch handles all cases of number" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO try testSwitchHandleAllCases(); @@ -455,7 +453,6 @@ test "else prong of switch on error set excludes other cases" { } test "switch prongs with error set cases make a new error set type for capture value" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -532,7 +529,6 @@ test "switch with null and T peer types and inferred result location type" { test "switch prongs with cases with identical payload types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const Union = union(enum) { diff --git a/test/behavior/union.zig b/test/behavior/union.zig index f90a336e76..3dd8919935 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1288,7 +1288,6 @@ test "return an extern union from C calling convention" { test "noreturn field in union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const U = union(enum) { -- cgit v1.2.3 From 3b1ea390a301dbdc992043d97cf618a94e8801de Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 1 May 2023 19:18:49 -0400 Subject: x86_64: cleanup lazy symbols In theory fixes updating lazy symbols during incremental compilation. --- src/arch/x86_64/CodeGen.zig | 190 ++++++++++++++++++-------------------------- src/link/Coff.zig | 77 ++++++++++-------- src/link/Elf.zig | 75 +++++++++-------- src/link/MachO.zig | 78 ++++++++++-------- 4 files changed, 207 insertions(+), 213 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 8e61af1c9f..a658103c1a 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -112,7 +112,7 @@ const Owner = union(enum) { mod_fn: *const Module.Fn, lazy_sym: link.File.LazySymbol, - fn getOwnerDecl(owner: Owner) Module.Decl.Index { + fn getDecl(owner: Owner) Module.Decl.Index { return switch (owner) { .mod_fn => |mod_fn| mod_fn.owner_decl, .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(), @@ -1688,35 +1688,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { const data_reg = try self.register_manager.allocReg(null, gp); const data_lock = self.register_manager.lockRegAssumeUnused(data_reg); defer self.register_manager.unlockReg(data_lock); - - const data_lazy_sym = link.File.LazySymbol{ .kind = .const_data, .ty = enum_ty }; - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = elf_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const atom = elf_file.getAtom(atom_index); - _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = atom.getOffsetTableAddress(elf_file); - try self.asmRegisterMemory( - .mov, - data_reg.to64(), - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), - ); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = coff_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(data_reg, Type.usize, .{ .lea_got = sym_index }); - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = macho_file.getOrCreateAtomForLazySymbol(data_lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(data_reg, Type.usize, .{ .lea_got = sym_index }); - } else { - return self.fail("TODO implement {s} for {}", .{ - @tagName(lazy_sym.kind), - lazy_sym.ty.fmt(self.bin_file.options.module.?), - }); - } + try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = enum_ty }); var data_off: i32 = 0; for ( @@ -6262,7 +6234,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genArgDbgInfo(name, ty, self.owner.getOwnerDecl(), loc); + try dw.genArgDbgInfo(name, ty, self.owner.getDecl(), loc); }, .plan9 => {}, .none => {}, @@ -6306,7 +6278,7 @@ fn genVarDbgInfo( // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genVarDbgInfo(name, ty, self.owner.getOwnerDecl(), is_ptr, loc); + try dw.genVarDbgInfo(name, ty, self.owner.getDecl(), is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -6630,38 +6602,13 @@ fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void { } fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const addr_reg = try self.register_manager.allocReg(null, gp); const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); - - const mod = self.bin_file.options.module.?; - const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, mod); - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = elf_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const atom = elf_file.getAtom(atom_index); - _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = atom.getOffsetTableAddress(elf_file); - try self.asmRegisterMemory( - .mov, - addr_reg.to64(), - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), - ); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = macho_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); - } else { - return self.fail("TODO implement airCmpLtErrorsLen for x86_64 {s}", .{@tagName(self.bin_file.tag)}); - } + try self.genLazySymbolRef(.lea, addr_reg, link.File.LazySymbol.initDecl(.const_data, null, mod)); try self.spillEflagsIfOccupied(); self.eflags_inst = inst; @@ -7999,6 +7946,67 @@ fn genInlineMemset(self: *Self, dst_ptr: MCValue, value: MCValue, len: MCValue) }); } +fn genLazySymbolRef( + self: *Self, + comptime tag: Mir.Inst.Tag, + reg: Register, + lazy_sym: link.File.LazySymbol, +) InnerError!void { + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const atom_index = elf_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const atom = elf_file.getAtom(atom_index); + _ = try atom.getOrCreateOffsetTableEntry(elf_file); + const got_addr = atom.getOffsetTableAddress(elf_file); + const got_mem = + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }); + switch (tag) { + .lea, .mov => try self.asmRegisterMemory(.mov, reg.to64(), got_mem), + .call => try self.asmMemory(.call, got_mem), + else => unreachable, + } + switch (tag) { + .lea, .call => {}, + .mov => try self.asmRegisterMemory( + tag, + reg.to64(), + Memory.sib(.qword, .{ .base = .{ .reg = reg.to64() } }), + ), + else => unreachable, + } + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; + switch (tag) { + .lea, .call => try self.genSetReg(reg, Type.usize, .{ .lea_got = sym_index }), + .mov => try self.genSetReg(reg, Type.usize, .{ .load_got = sym_index }), + else => unreachable, + } + switch (tag) { + .lea, .mov => {}, + .call => try self.asmRegister(.call, reg), + else => unreachable, + } + } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + const atom_index = macho_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; + switch (tag) { + .lea, .call => try self.genSetReg(reg, Type.usize, .{ .lea_got = sym_index }), + .mov => try self.genSetReg(reg, Type.usize, .{ .load_got = sym_index }), + else => unreachable, + } + switch (tag) { + .lea, .mov => {}, + .call => try self.asmRegister(.call, reg), + else => unreachable, + } + } else { + return self.fail("TODO implement genLazySymbol for x86_64 {s}", .{@tagName(self.bin_file.tag)}); + } +} + fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result = result: { @@ -8701,6 +8709,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { } fn airTagName(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const inst_ty = self.air.typeOfIndex(inst); const enum_ty = self.air.typeOf(un_op); @@ -8731,38 +8740,17 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(un_op); try self.genSetReg(param_regs[1], enum_ty, operand); - const mod = self.bin_file.options.module.?; - const lazy_sym = link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod); - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = elf_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const atom = elf_file.getAtom(atom_index); - _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = atom.getOffsetTableAddress(elf_file); - try self.asmMemory( - .call, - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), - ); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); - try self.asmRegister(.call, .rax); - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = macho_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); - try self.asmRegister(.call, .rax); - } else { - return self.fail("TODO implement airTagName for x86_64 {s}", .{@tagName(self.bin_file.tag)}); - } + try self.genLazySymbolRef( + .call, + .rax, + link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod), + ); return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const err_ty = self.air.typeOf(un_op); @@ -8774,33 +8762,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const addr_reg = try self.register_manager.allocReg(null, gp); const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); - - const mod = self.bin_file.options.module.?; - const lazy_sym = link.File.LazySymbol.initDecl(.const_data, null, mod); - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = elf_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const atom = elf_file.getAtom(atom_index); - _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = atom.getOffsetTableAddress(elf_file); - try self.asmRegisterMemory( - .mov, - addr_reg.to64(), - Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }), - ); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = macho_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| - return self.fail("{s} creating lazy symbol", .{@errorName(err)}); - const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(addr_reg, Type.usize, .{ .lea_got = sym_index }); - } else { - return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)}); - } + try self.genLazySymbolRef(.lea, addr_reg, link.File.LazySymbol.initDecl(.const_data, null, mod)); const start_reg = try self.register_manager.allocReg(null, gp); const start_lock = self.register_manager.lockRegAssumeUnused(start_reg); @@ -9117,7 +9079,7 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV } fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { - return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getOwnerDecl())) { + return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getDecl())) { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 41d4617e53..81e8c57bdd 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -143,8 +143,11 @@ const Section = struct { const LazySymbolTable = std.AutoArrayHashMapUnmanaged(Module.Decl.OptionalIndex, LazySymbolMetadata); const LazySymbolMetadata = struct { - text_atom: ?Atom.Index = null, - rdata_atom: ?Atom.Index = null, + const State = enum { unused, pending_flush, flushed }; + text_atom: Atom.Index = undefined, + rdata_atom: Atom.Index = undefined, + text_state: State = .unused, + rdata_state: State = .unused, }; const DeclMetadata = struct { @@ -1150,8 +1153,6 @@ pub fn updateDecl( const tracy = trace(@src()); defer tracy.end(); - try self.updateLazySymbol(decl_index); - const decl = module.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { @@ -1194,21 +1195,6 @@ pub fn updateDecl( return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); } -fn updateLazySymbol(self: *Coff, decl: ?Module.Decl.Index) !void { - const metadata = self.lazy_syms.get(Module.Decl.OptionalIndex.init(decl)) orelse return; - const mod = self.base.options.module.?; - if (metadata.text_atom) |atom| try self.updateLazySymbolAtom( - link.File.LazySymbol.initDecl(.code, decl, mod), - atom, - self.text_section_index.?, - ); - if (metadata.rdata_atom) |atom| try self.updateLazySymbolAtom( - link.File.LazySymbol.initDecl(.const_data, decl, mod), - atom, - self.rdata_section_index.?, - ); -} - fn updateLazySymbolAtom( self: *Coff, sym: link.File.LazySymbol, @@ -1279,14 +1265,19 @@ pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Ato const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; - const atom_ptr = switch (sym.kind) { - .code => &gop.value_ptr.text_atom, - .const_data => &gop.value_ptr.rdata_atom, + const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { + .code => .{ .atom = &gop.value_ptr.text_atom, .state = &gop.value_ptr.text_state }, + .const_data => .{ .atom = &gop.value_ptr.rdata_atom, .state = &gop.value_ptr.rdata_state }, }; - if (atom_ptr.*) |atom| return atom; - const atom = try self.createAtom(); - atom_ptr.* = atom; - try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + switch (metadata.state.*) { + .unused => metadata.atom.* = try self.createAtom(), + .pending_flush => return metadata.atom.*, + .flushed => {}, + } + metadata.state.* = .pending_flush; + const atom = metadata.atom.*; + // anyerror needs to be deferred until flushModule + if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.rdata_section_index.?, }); @@ -1617,15 +1608,35 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod sub_prog_node.activate(); defer sub_prog_node.end(); - // Most lazy symbols can be updated when the corresponding decl is, - // so we only have to worry about the one without an associated decl. - self.updateLazySymbol(null) catch |err| switch (err) { - error.CodegenFail => return error.FlushFailure, - else => |e| return e, - }; - const gpa = self.base.allocator; + const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; + + if (self.lazy_syms.getPtr(.none)) |metadata| { + // Most lazy symbols can be updated on first use, but + // anyerror needs to wait for everything to be flushed. + if (metadata.text_state != .unused) self.updateLazySymbolAtom( + link.File.LazySymbol.initDecl(.code, null, module), + metadata.text_atom, + self.text_section_index.?, + ) catch |err| return switch (err) { + error.CodegenFail => error.FlushFailure, + else => |e| e, + }; + if (metadata.rdata_state != .unused) self.updateLazySymbolAtom( + link.File.LazySymbol.initDecl(.const_data, null, module), + metadata.rdata_atom, + self.rdata_section_index.?, + ) catch |err| return switch (err) { + error.CodegenFail => error.FlushFailure, + else => |e| e, + }; + } + for (self.lazy_syms.values()) |*metadata| { + if (metadata.text_state != .unused) metadata.text_state = .flushed; + if (metadata.rdata_state != .unused) metadata.rdata_state = .flushed; + } + while (self.unresolved.popOrNull()) |entry| { assert(entry.value); // We only expect imports generated by the incremental linker for now. const global = self.globals.items[entry.key]; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index ec113e5c8a..724ec76500 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -65,8 +65,11 @@ const Section = struct { }; const LazySymbolMetadata = struct { - text_atom: ?Atom.Index = null, - rodata_atom: ?Atom.Index = null, + const State = enum { unused, pending_flush, flushed }; + text_atom: Atom.Index = undefined, + rodata_atom: Atom.Index = undefined, + text_state: State = .unused, + rodata_state: State = .unused, }; const DeclMetadata = struct { @@ -1032,17 +1035,35 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node sub_prog_node.activate(); defer sub_prog_node.end(); - // Most lazy symbols can be updated when the corresponding decl is, - // so we only have to worry about the one without an associated decl. - self.updateLazySymbol(null) catch |err| switch (err) { - error.CodegenFail => return error.FlushFailure, - else => |e| return e, - }; - // TODO This linker code currently assumes there is only 1 compilation unit and it // corresponds to the Zig source code. const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; + if (self.lazy_syms.getPtr(.none)) |metadata| { + // Most lazy symbols can be updated on first use, but + // anyerror needs to wait for everything to be flushed. + if (metadata.text_state != .unused) self.updateLazySymbolAtom( + File.LazySymbol.initDecl(.code, null, module), + metadata.text_atom, + self.text_section_index.?, + ) catch |err| return switch (err) { + error.CodegenFail => error.FlushFailure, + else => |e| e, + }; + if (metadata.rodata_state != .unused) self.updateLazySymbolAtom( + File.LazySymbol.initDecl(.const_data, null, module), + metadata.rodata_atom, + self.rodata_section_index.?, + ) catch |err| return switch (err) { + error.CodegenFail => error.FlushFailure, + else => |e| e, + }; + } + for (self.lazy_syms.values()) |*metadata| { + if (metadata.text_state != .unused) metadata.text_state = .flushed; + if (metadata.rodata_state != .unused) metadata.rodata_state = .flushed; + } + const target_endian = self.base.options.target.cpu.arch.endian(); const foreign_endian = target_endian != builtin.cpu.arch.endian(); @@ -2378,14 +2399,19 @@ pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Inde const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; - const atom_ptr = switch (sym.kind) { - .code => &gop.value_ptr.text_atom, - .const_data => &gop.value_ptr.rodata_atom, + const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { + .code => .{ .atom = &gop.value_ptr.text_atom, .state = &gop.value_ptr.text_state }, + .const_data => .{ .atom = &gop.value_ptr.rodata_atom, .state = &gop.value_ptr.rodata_state }, }; - if (atom_ptr.*) |atom| return atom; - const atom = try self.createAtom(); - atom_ptr.* = atom; - try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + switch (metadata.state.*) { + .unused => metadata.atom.* = try self.createAtom(), + .pending_flush => return metadata.atom.*, + .flushed => {}, + } + metadata.state.* = .pending_flush; + const atom = metadata.atom.*; + // anyerror needs to be deferred until flushModule + if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.rodata_section_index.?, }); @@ -2598,8 +2624,6 @@ pub fn updateDecl( const tracy = trace(@src()); defer tracy.end(); - try self.updateLazySymbol(decl_index); - const decl = module.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { @@ -2666,21 +2690,6 @@ pub fn updateDecl( return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); } -fn updateLazySymbol(self: *Elf, decl: ?Module.Decl.Index) !void { - const metadata = self.lazy_syms.get(Module.Decl.OptionalIndex.init(decl)) orelse return; - const mod = self.base.options.module.?; - if (metadata.text_atom) |atom| try self.updateLazySymbolAtom( - File.LazySymbol.initDecl(.code, decl, mod), - atom, - self.text_section_index.?, - ); - if (metadata.rodata_atom) |atom| try self.updateLazySymbolAtom( - File.LazySymbol.initDecl(.const_data, decl, mod), - atom, - self.rodata_section_index.?, - ); -} - fn updateLazySymbolAtom( self: *Elf, sym: File.LazySymbol, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 458b283d4f..a346ec756f 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -236,8 +236,11 @@ const is_hot_update_compatible = switch (builtin.target.os.tag) { const LazySymbolTable = std.AutoArrayHashMapUnmanaged(Module.Decl.OptionalIndex, LazySymbolMetadata); const LazySymbolMetadata = struct { - text_atom: ?Atom.Index = null, - data_const_atom: ?Atom.Index = null, + const State = enum { unused, pending_flush, flushed }; + text_atom: Atom.Index = undefined, + data_const_atom: Atom.Index = undefined, + text_state: State = .unused, + data_const_state: State = .unused, }; const TlvSymbolTable = std.AutoArrayHashMapUnmanaged(SymbolWithLoc, Atom.Index); @@ -493,15 +496,33 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No sub_prog_node.activate(); defer sub_prog_node.end(); - // Most lazy symbols can be updated when the corresponding decl is, - // so we only have to worry about the one without an associated decl. - self.updateLazySymbol(null) catch |err| switch (err) { - error.CodegenFail => return error.FlushFailure, - else => |e| return e, - }; - const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; + if (self.lazy_syms.getPtr(.none)) |metadata| { + // Most lazy symbols can be updated on first use, but + // anyerror needs to wait for everything to be flushed. + if (metadata.text_state != .unused) self.updateLazySymbolAtom( + File.LazySymbol.initDecl(.code, null, module), + metadata.text_atom, + self.text_section_index.?, + ) catch |err| return switch (err) { + error.CodegenFail => error.FlushFailure, + else => |e| e, + }; + if (metadata.data_const_state != .unused) self.updateLazySymbolAtom( + File.LazySymbol.initDecl(.const_data, null, module), + metadata.data_const_atom, + self.data_const_section_index.?, + ) catch |err| return switch (err) { + error.CodegenFail => error.FlushFailure, + else => |e| e, + }; + } + for (self.lazy_syms.values()) |*metadata| { + if (metadata.text_state != .unused) metadata.text_state = .flushed; + if (metadata.data_const_state != .unused) metadata.data_const_state = .flushed; + } + if (self.d_sym) |*d_sym| { try d_sym.dwarf.flushModule(module); } @@ -1960,8 +1981,6 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) const tracy = trace(@src()); defer tracy.end(); - try self.updateLazySymbol(decl_index); - const decl = module.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { @@ -2036,21 +2055,6 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); } -fn updateLazySymbol(self: *MachO, decl: ?Module.Decl.Index) !void { - const metadata = self.lazy_syms.get(Module.Decl.OptionalIndex.init(decl)) orelse return; - const mod = self.base.options.module.?; - if (metadata.text_atom) |atom| try self.updateLazySymbolAtom( - File.LazySymbol.initDecl(.code, decl, mod), - atom, - self.text_section_index.?, - ); - if (metadata.data_const_atom) |atom| try self.updateLazySymbolAtom( - File.LazySymbol.initDecl(.const_data, decl, mod), - atom, - self.data_const_section_index.?, - ); -} - fn updateLazySymbolAtom( self: *MachO, sym: File.LazySymbol, @@ -2125,14 +2129,22 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; - const atom_ptr = switch (sym.kind) { - .code => &gop.value_ptr.text_atom, - .const_data => &gop.value_ptr.data_const_atom, + const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { + .code => .{ .atom = &gop.value_ptr.text_atom, .state = &gop.value_ptr.text_state }, + .const_data => .{ + .atom = &gop.value_ptr.data_const_atom, + .state = &gop.value_ptr.data_const_state, + }, }; - if (atom_ptr.*) |atom| return atom; - const atom = try self.createAtom(); - atom_ptr.* = atom; - try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + switch (metadata.state.*) { + .unused => metadata.atom.* = try self.createAtom(), + .pending_flush => return metadata.atom.*, + .flushed => {}, + } + metadata.state.* = .pending_flush; + const atom = metadata.atom.*; + // anyerror needs to be deferred until flushModule + if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.data_const_section_index.?, }); -- cgit v1.2.3 From 152c7b1885a0ed8e5a2435ef7b51568b357eaea4 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 30 Apr 2023 10:30:40 +0100 Subject: Implement multi-argument @min/@max and notice bounds Resolves: #14039 --- src/AstGen.zig | 63 +++++++--- src/BuiltinFn.zig | 4 +- src/Sema.zig | 241 +++++++++++++++++++++++++++++++------- src/Zir.zig | 16 ++- src/arch/x86_64/CodeGen.zig | 8 +- src/print_zir.zig | 2 + src/type.zig | 67 ++++++++++- test/behavior/maximum_minimum.zig | 56 +++++++++ 8 files changed, 388 insertions(+), 69 deletions(-) (limited to 'src/arch') diff --git a/src/AstGen.zig b/src/AstGen.zig index aece3eafec..2568b89980 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -7907,6 +7907,48 @@ fn typeOf( return rvalue(gz, ri, typeof_inst, node); } +fn minMax( + gz: *GenZir, + scope: *Scope, + ri: ResultInfo, + node: Ast.Node.Index, + args: []const Ast.Node.Index, + comptime op: enum { min, max }, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + if (args.len < 2) { + return astgen.failNode(node, "expected at least 2 arguments, found 0", .{}); + } + if (args.len == 2) { + const tag: Zir.Inst.Tag = switch (op) { + .min => .min, + .max => .max, + }; + const a = try expr(gz, scope, .{ .rl = .none }, args[0]); + const b = try expr(gz, scope, .{ .rl = .none }, args[1]); + const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ + .lhs = a, + .rhs = b, + }); + return rvalue(gz, ri, result, node); + } + const payload_index = try addExtra(astgen, Zir.Inst.NodeMultiOp{ + .src_node = gz.nodeIndexToRelative(node), + }); + var extra_index = try reserveExtra(gz.astgen, args.len); + for (args) |arg| { + const arg_ref = try expr(gz, scope, .{ .rl = .none }, arg); + astgen.extra.items[extra_index] = @enumToInt(arg_ref); + extra_index += 1; + } + const tag: Zir.Inst.Extended = switch (op) { + .min => .min_multi, + .max => .max_multi, + }; + const result = try gz.addExtendedMultiOpPayloadIndex(tag, payload_index, args.len); + return rvalue(gz, ri, result, node); +} + fn builtinCall( gz: *GenZir, scope: *Scope, @@ -7997,6 +8039,8 @@ fn builtinCall( .TypeOf => return typeOf( gz, scope, ri, node, params), .union_init => return unionInit(gz, scope, ri, node, params), .c_import => return cImport( gz, scope, node, params[0]), + .min => return minMax( gz, scope, ri, node, params, .min), + .max => return minMax( gz, scope, ri, node, params, .max), // zig fmt: on .@"export" => { @@ -8358,25 +8402,6 @@ fn builtinCall( return rvalue(gz, ri, result, node); }, - .max => { - const a = try expr(gz, scope, .{ .rl = .none }, params[0]); - const b = try expr(gz, scope, .{ .rl = .none }, params[1]); - const result = try gz.addPlNode(.max, node, Zir.Inst.Bin{ - .lhs = a, - .rhs = b, - }); - return rvalue(gz, ri, result, node); - }, - .min => { - const a = try expr(gz, scope, .{ .rl = .none }, params[0]); - const b = try expr(gz, scope, .{ .rl = .none }, params[1]); - const result = try gz.addPlNode(.min, node, Zir.Inst.Bin{ - .lhs = a, - .rhs = b, - }); - return rvalue(gz, ri, result, node); - }, - .add_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .add_with_overflow), .sub_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .sub_with_overflow), .mul_with_overflow => return overflowArithmetic(gz, scope, ri, node, params, .mul_with_overflow), diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig index d31a1e7c25..426939afdf 100644 --- a/src/BuiltinFn.zig +++ b/src/BuiltinFn.zig @@ -608,7 +608,7 @@ pub const list = list: { "@max", .{ .tag = .max, - .param_count = 2, + .param_count = null, }, }, .{ @@ -629,7 +629,7 @@ pub const list = list: { "@min", .{ .tag = .min, - .param_count = 2, + .param_count = null, }, }, .{ diff --git a/src/Sema.zig b/src/Sema.zig index 79f2fd7fca..13f4d684a1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1137,6 +1137,8 @@ fn analyzeBodyInner( .asm_expr => try sema.zirAsm( block, extended, true), .typeof_peer => try sema.zirTypeofPeer( block, extended), .compile_log => try sema.zirCompileLog( extended), + .min_multi => try sema.zirMinMaxMulti( block, extended, .min), + .max_multi => try sema.zirMinMaxMulti( block, extended, .max), .add_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode), .sub_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode), .mul_with_overflow => try sema.zirOverflowArithmetic(block, extended, extended.opcode), @@ -12143,7 +12145,7 @@ fn zirShl( lhs_ty, try lhs_ty.maxInt(sema.arena, target), ); - const rhs_limited = try sema.analyzeMinMax(block, rhs_src, rhs, max_int, .min, rhs_src, rhs_src); + const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); } else { break :rhs rhs; @@ -21752,64 +21754,223 @@ fn zirMinMax( const rhs = try sema.resolveInst(extra.rhs); try sema.checkNumericType(block, lhs_src, sema.typeOf(lhs)); try sema.checkNumericType(block, rhs_src, sema.typeOf(rhs)); - return sema.analyzeMinMax(block, src, lhs, rhs, air_tag, lhs_src, rhs_src); + return sema.analyzeMinMax(block, src, air_tag, &.{ lhs, rhs }, &.{ lhs_src, rhs_src }); +} + +fn zirMinMaxMulti( + sema: *Sema, + block: *Block, + extended: Zir.Inst.Extended.InstData, + comptime air_tag: Air.Inst.Tag, +) CompileError!Air.Inst.Ref { + const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand); + const src_node = extra.data.src_node; + const src = LazySrcLoc.nodeOffset(src_node); + const operands = sema.code.refSlice(extra.end, extended.small); + + const air_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len); + const operand_srcs = try sema.arena.alloc(LazySrcLoc, operands.len); + + for (operands, air_refs, operand_srcs, 0..) |zir_ref, *air_ref, *op_src, i| { + op_src.* = switch (i) { + 0 => .{ .node_offset_builtin_call_arg0 = src_node }, + 1 => .{ .node_offset_builtin_call_arg1 = src_node }, + 2 => .{ .node_offset_builtin_call_arg2 = src_node }, + 3 => .{ .node_offset_builtin_call_arg3 = src_node }, + 4 => .{ .node_offset_builtin_call_arg4 = src_node }, + 5 => .{ .node_offset_builtin_call_arg5 = src_node }, + else => src, // TODO: better source location + }; + air_ref.* = try sema.resolveInst(zir_ref); + try sema.checkNumericType(block, op_src.*, sema.typeOf(air_ref.*)); + } + + return sema.analyzeMinMax(block, src, air_tag, air_refs, operand_srcs); } fn analyzeMinMax( sema: *Sema, block: *Block, src: LazySrcLoc, - lhs: Air.Inst.Ref, - rhs: Air.Inst.Ref, comptime air_tag: Air.Inst.Tag, - lhs_src: LazySrcLoc, - rhs_src: LazySrcLoc, + operands: []const Air.Inst.Ref, + operand_srcs: []const LazySrcLoc, ) CompileError!Air.Inst.Ref { - const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src); + assert(operands.len == operand_srcs.len); + assert(operands.len > 0); - // TODO @max(max_int, undefined) should return max_int + if (operands.len == 1) return operands[0]; - const runtime_src = if (simd_op.lhs_val) |lhs_val| rs: { - if (lhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); + const mod = sema.mod; + const target = mod.getTarget(); + const opFunc = switch (air_tag) { + .min => Value.numberMin, + .max => Value.numberMax, + else => unreachable, + }; - const rhs_val = simd_op.rhs_val orelse break :rs rhs_src; + // First, find all comptime-known arguments, and get their min/max + var runtime_known = try std.DynamicBitSet.initFull(sema.arena, operands.len); + var cur_minmax: ?Air.Inst.Ref = null; + var cur_minmax_src: LazySrcLoc = undefined; // defined if cur_minmax not null + for (operands, operand_srcs, 0..) |operand, operand_src, operand_idx| { + // Resolve the value now to avoid redundant calls to `checkSimdBinOp` - we'll have to call + // it in the runtime path anyway since the result type may have been refined + const uncasted_operand_val = (try sema.resolveMaybeUndefVal(operand)) orelse continue; + if (cur_minmax) |cur| { + const simd_op = try sema.checkSimdBinOp(block, src, cur, operand, cur_minmax_src, operand_src); + const cur_val = simd_op.lhs_val.?; // cur_minmax is comptime-known + const operand_val = simd_op.rhs_val.?; // we checked the operand was resolvable above + + runtime_known.unset(operand_idx); + + if (cur_val.isUndef()) continue; // result is also undef + if (operand_val.isUndef()) { + cur_minmax = try sema.addConstUndef(simd_op.result_ty); + continue; + } - if (rhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); + try sema.resolveLazyValue(cur_val); + try sema.resolveLazyValue(operand_val); - try sema.resolveLazyValue(lhs_val); - try sema.resolveLazyValue(rhs_val); + const vec_len = simd_op.len orelse { + const result_val = opFunc(cur_val, operand_val, target); + cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); + continue; + }; + var lhs_buf: Value.ElemValueBuffer = undefined; + var rhs_buf: Value.ElemValueBuffer = undefined; + const elems = try sema.arena.alloc(Value, vec_len); + for (elems, 0..) |*elem, i| { + const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf); + elem.* = opFunc(lhs_elem_val, rhs_elem_val, target); + } + cur_minmax = try sema.addConstant( + simd_op.result_ty, + try Value.Tag.aggregate.create(sema.arena, elems), + ); + } else { + runtime_known.unset(operand_idx); + cur_minmax = try sema.addConstant(sema.typeOf(operand), uncasted_operand_val); + cur_minmax_src = operand_src; + } + } + + const comptime_refined_ty: ?Type = if (cur_minmax) |ct_minmax_ref| refined: { + // Refine the comptime-known result type based on the operation + const val = (try sema.resolveMaybeUndefVal(ct_minmax_ref)).?; + const orig_ty = sema.typeOf(ct_minmax_ref); + const refined_ty = if (orig_ty.zigTypeTag() == .Vector) blk: { + const elem_ty = orig_ty.childType(); + const len = orig_ty.vectorLen(); + + if (len == 0) break :blk orig_ty; + if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats + + var cur_min: Value = try val.elemValue(mod, sema.arena, 0); + var cur_max: Value = cur_min; + for (1..len) |idx| { + const elem_val = try val.elemValue(mod, sema.arena, idx); + if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef + if (Value.order(elem_val, cur_min, target).compare(.lt)) cur_min = elem_val; + if (Value.order(elem_val, cur_max, target).compare(.gt)) cur_max = elem_val; + } + + const refined_elem_ty = try Type.intFittingRange(target, sema.arena, cur_min, cur_max); + break :blk try Type.vector(sema.arena, len, refined_elem_ty); + } else blk: { + if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats + if (val.isUndef()) break :blk orig_ty; // can't refine undef + break :blk try Type.intFittingRange(target, sema.arena, val, val); + }; + + // Apply the refined type to the current value - this isn't strictly necessary in the + // runtime case since we'll refine again afterwards, but keeping things as small as possible + // will allow us to emit more optimal AIR (if all the runtime operands have smaller types + // than the non-refined comptime type). + if (!refined_ty.eql(orig_ty, mod)) { + if (std.debug.runtime_safety) { + assert(try sema.intFitsInType(val, refined_ty, null)); + } + cur_minmax = try sema.addConstant(refined_ty, val); + } + + break :refined refined_ty; + } else null; + + const runtime_idx = runtime_known.findFirstSet() orelse return cur_minmax.?; + const runtime_src = operand_srcs[runtime_idx]; + try sema.requireRuntimeBlock(block, src, runtime_src); + + // Now, iterate over runtime operands, emitting a min/max instruction for each. We'll refine the + // type again at the end, based on the comptime-known bound. + + // If the comptime-known part is undef we can avoid emitting actual instructions later + const known_undef = if (cur_minmax) |operand| blk: { + const val = (try sema.resolveMaybeUndefVal(operand)).?; + break :blk val.isUndef(); + } else false; + + if (cur_minmax == null) { + // No comptime operands - use the first operand as the starting value + assert(runtime_idx == 0); + cur_minmax = operands[0]; + cur_minmax_src = runtime_src; + runtime_known.unset(0); // don't look at this operand in the loop below + } + + var it = runtime_known.iterator(.{}); + while (it.next()) |idx| { + const lhs = cur_minmax.?; + const lhs_src = cur_minmax_src; + const rhs = operands[idx]; + const rhs_src = operand_srcs[idx]; + const simd_op = try sema.checkSimdBinOp(block, src, lhs, rhs, lhs_src, rhs_src); + if (known_undef) { + cur_minmax = try sema.addConstant(simd_op.result_ty, Value.undef); + } else { + cur_minmax = try block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs); + } + } + + if (comptime_refined_ty) |comptime_ty| refine: { + // Finally, refine the type based on the comptime-known bound. + if (known_undef) break :refine; // can't refine undef + const unrefined_ty = sema.typeOf(cur_minmax.?); + const is_vector = unrefined_ty.zigTypeTag() == .Vector; + const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty; + const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty; + + if (unrefined_elem_ty.isAnyFloat()) break :refine; // we can't refine floats - const opFunc = switch (air_tag) { - .min => Value.numberMin, - .max => Value.numberMax, + // Compute the final bounds based on the runtime type and the comptime-known bound type + const min_val = switch (air_tag) { + .min => try unrefined_elem_ty.minInt(sema.arena, target), + .max => try comptime_elem_ty.minInt(sema.arena, target), // @max(ct, rt) >= ct else => unreachable, }; - const target = sema.mod.getTarget(); - const vec_len = simd_op.len orelse { - const result_val = opFunc(lhs_val, rhs_val, target); - return sema.addConstant(simd_op.result_ty, result_val); + const max_val = switch (air_tag) { + .min => try comptime_elem_ty.maxInt(sema.arena, target), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(sema.arena, target), + else => unreachable, }; - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); - for (elems, 0..) |*elem, i| { - const lhs_elem_val = lhs_val.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem_val = rhs_val.elemValueBuffer(sema.mod, i, &rhs_buf); - elem.* = opFunc(lhs_elem_val, rhs_elem_val, target); - } - return sema.addConstant( - simd_op.result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); - } else rs: { - if (simd_op.rhs_val) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(simd_op.result_ty); + + // Find the smallest type which can contain these bounds + const final_elem_ty = try Type.intFittingRange(target, sema.arena, min_val, max_val); + + const final_ty = if (is_vector) + try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty) + else + final_elem_ty; + + if (!final_ty.eql(unrefined_ty, mod)) { + // We've reduced the type - cast the result down + return block.addTyOp(.intcast, final_ty, cur_minmax.?); } - break :rs lhs_src; - }; + } - try sema.requireRuntimeBlock(block, src, runtime_src); - return block.addBinOp(air_tag, simd_op.lhs, simd_op.rhs); + return cur_minmax.?; } fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref { diff --git a/src/Zir.zig b/src/Zir.zig index ab33b625f7..51c90c61cb 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -927,10 +927,10 @@ pub const Inst = struct { /// Implements the `@memset` builtin. /// Uses the `pl_node` union field with payload `Bin`. memset, - /// Implements the `@min` builtin. + /// Implements the `@min` builtin for 2 args. /// Uses the `pl_node` union field with payload `Bin` min, - /// Implements the `@max` builtin. + /// Implements the `@max` builtin for 2 args. /// Uses the `pl_node` union field with payload `Bin` max, /// Implements the `@cImport` builtin. @@ -1905,10 +1905,20 @@ pub const Inst = struct { compile_log, /// The builtin `@TypeOf` which returns the type after Peer Type Resolution /// of one or more params. - /// `operand` is payload index to `NodeMultiOp`. + /// `operand` is payload index to `TypeOfPeer`. /// `small` is `operands_len`. /// The AST node is the builtin call. typeof_peer, + /// Implements the `@min` builtin for more than 2 args. + /// `operand` is payload index to `NodeMultiOp`. + /// `small` is `operands_len`. + /// The AST node is the builtin call. + min_multi, + /// Implements the `@max` builtin for more than 2 args. + /// `operand` is payload index to `NodeMultiOp`. + /// `small` is `operands_len`. + /// The AST node is the builtin call. + max_multi, /// Implements the `@addWithOverflow` builtin. /// `operand` is payload index to `BinNode`. /// `small` is unused. diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index a658103c1a..dd093508b1 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4298,7 +4298,7 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn const val_ty = ptr_info.pointee_type; const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); - const limb_abi_size = @min(val_abi_size, 8); + const limb_abi_size: u32 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const val_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); const val_bit_off = ptr_info.bit_offset % limb_abi_bits; @@ -4434,7 +4434,7 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In const ptr_info = ptr_ty.ptrInfo().data; const src_ty = ptr_ty.childType(); - const limb_abi_size = @min(ptr_info.host_size, 8); + const limb_abi_size: u16 = @min(ptr_info.host_size, 8); const limb_abi_bits = limb_abi_size * 8; const src_bit_size = src_ty.bitSize(self.target.*); @@ -4652,7 +4652,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } const field_abi_size = @intCast(u32, field_ty.abiSize(self.target.*)); - const limb_abi_size = @min(field_abi_size, 8); + const limb_abi_size: u32 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); const field_bit_off = field_off % limb_abi_bits; @@ -5875,7 +5875,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s }, .memory, .indirect, .load_got, .load_direct, .load_tlv, .load_frame => { const OpInfo = ?struct { addr_reg: Register, addr_lock: RegisterLock }; - const limb_abi_size = @min(abi_size, 8); + const limb_abi_size: u32 = @min(abi_size, 8); const dst_info: OpInfo = switch (dst_mcv) { else => unreachable, diff --git a/src/print_zir.zig b/src/print_zir.zig index 922366dc85..4e4f13641c 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -482,6 +482,8 @@ const Writer = struct { .compile_log => try self.writeNodeMultiOp(stream, extended), .typeof_peer => try self.writeTypeofPeer(stream, extended), + .min_multi => try self.writeNodeMultiOp(stream, extended), + .max_multi => try self.writeNodeMultiOp(stream, extended), .select => try self.writeSelect(stream, extended), diff --git a/src/type.zig b/src/type.zig index b25f13342d..c9a6f49d3e 100644 --- a/src/type.zig +++ b/src/type.zig @@ -6723,7 +6723,17 @@ pub const Type = extern union { pub fn smallestUnsignedInt(arena: Allocator, max: u64) !Type { const bits = smallestUnsignedBits(max); - return switch (bits) { + return intWithBits(arena, false, bits); + } + + pub fn intWithBits(arena: Allocator, sign: bool, bits: u16) !Type { + return if (sign) switch (bits) { + 8 => initTag(.i8), + 16 => initTag(.i16), + 32 => initTag(.i32), + 64 => initTag(.i64), + else => return Tag.int_signed.create(arena, bits), + } else switch (bits) { 1 => initTag(.u1), 8 => initTag(.u8), 16 => initTag(.u16), @@ -6733,6 +6743,61 @@ pub const Type = extern union { }; } + /// Given a value representing an integer, returns the number of bits necessary to represent + /// this value in an integer. If `sign` is true, returns the number of bits necessary in a + /// twos-complement integer; otherwise in an unsigned integer. + /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. + pub fn intBitsForValue(target: Target, val: Value, sign: bool) u16 { + assert(!val.isUndef()); + switch (val.tag()) { + .int_big_positive => { + const limbs = val.castTag(.int_big_positive).?.data; + const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; + return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); + }, + .int_big_negative => { + const limbs = val.castTag(.int_big_negative).?.data; + // Zero is still a possibility, in which case unsigned is fine + for (limbs) |limb| { + if (limb != 0) break; + } else return 0; // val == 0 + assert(sign); + const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; + return @intCast(u16, big.bitCountTwosComp()); + }, + .int_i64 => { + const x = val.castTag(.int_i64).?.data; + if (x >= 0) return smallestUnsignedBits(@intCast(u64, x)); + assert(sign); + return smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; + }, + else => { + const x = val.toUnsignedInt(target); + return smallestUnsignedBits(x) + @boolToInt(sign); + }, + } + } + + /// Returns the smallest possible integer type containing both `min` and `max`. Asserts that neither + /// value is undef. + /// TODO: if #3806 is implemented, this becomes trivial + pub fn intFittingRange(target: Target, arena: Allocator, min: Value, max: Value) !Type { + assert(!min.isUndef()); + assert(!max.isUndef()); + + if (std.debug.runtime_safety) { + assert(Value.order(min, max, target).compare(.lte)); + } + + const sign = min.orderAgainstZero() == .lt; + + const min_val_bits = intBitsForValue(target, min, sign); + const max_val_bits = intBitsForValue(target, max, sign); + const bits = @max(min_val_bits, max_val_bits); + + return intWithBits(arena, sign, bits); + } + /// This is only used for comptime asserts. Bump this number when you make a change /// to packed struct layout to find out all the places in the codebase you need to edit! pub const packed_struct_layout_version = 2; diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index 9a4ae40eef..d7b93c56c0 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -106,3 +106,59 @@ test "@min/@max on lazy values" { const size = @max(@sizeOf(A), @sizeOf(B)); try expect(size == @sizeOf(B)); } + +test "@min/@max more than two arguments" { + const x: u32 = 30; + const y: u32 = 10; + const z: u32 = 20; + try expectEqual(@as(u32, 10), @min(x, y, z)); + try expectEqual(@as(u32, 30), @max(x, y, z)); +} + +test "@min/@max more than two vector arguments" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + + const x: @Vector(2, u32) = .{ 3, 2 }; + const y: @Vector(2, u32) = .{ 4, 1 }; + const z: @Vector(2, u32) = .{ 5, 0 }; + try expectEqual(@Vector(2, u32){ 3, 0 }, @min(x, y, z)); + try expectEqual(@Vector(2, u32){ 5, 2 }, @max(x, y, z)); +} + +test "@min/@max notices bounds" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + + var x: u16 = 20; + const y = 30; + var z: u32 = 100; + const min = @min(x, y, z); + const max = @max(x, y, z); + try expectEqual(x, min); + try expectEqual(u5, @TypeOf(min)); + try expectEqual(z, max); + try expectEqual(u32, @TypeOf(max)); +} + +test "@min/@max notices vector bounds" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + + var x: @Vector(2, u16) = .{ 140, 40 }; + const y: @Vector(2, u64) = .{ 5, 100 }; + var z: @Vector(2, u32) = .{ 10, 300 }; + const min = @min(x, y, z); + const max = @max(x, y, z); + try expectEqual(@Vector(2, u32){ 5, 40 }, min); + try expectEqual(@Vector(2, u7), @TypeOf(min)); + try expectEqual(@Vector(2, u32){ 140, 300 }, max); + try expectEqual(@Vector(2, u32), @TypeOf(max)); +} -- cgit v1.2.3 From 3f5592c114ffbc28e7ffb9e9e411870b31d81dd8 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 2 May 2023 00:50:38 -0400 Subject: x86_64: implement slice elem ptr for more MCValue tags --- src/arch/x86_64/CodeGen.zig | 12 +----------- test/behavior/for.zig | 4 ---- 2 files changed, 1 insertion(+), 15 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index dd093508b1..51518485e4 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3502,17 +3502,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { defer self.register_manager.unlockReg(offset_reg_lock); const addr_reg = try self.register_manager.allocReg(null, gp); - switch (slice_mcv) { - .load_frame => |frame_addr| try self.asmRegisterMemory( - .mov, - addr_reg.to64(), - Memory.sib(.qword, .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, - }), - ), - else => return self.fail("TODO implement slice_elem_ptr when slice is {}", .{slice_mcv}), - } + try self.genSetReg(addr_reg, Type.usize, slice_mcv); // TODO we could allocate register here, but need to expect addr register and potentially // offset register. try self.genBinOpMir(.add, slice_ptr_field_type, .{ .register = addr_reg }, .{ diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 0cec2d62aa..b3d82fd255 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -66,7 +66,6 @@ test "ignore lval with underscore (for loop)" { } test "basic for loop" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -306,7 +305,6 @@ test "1-based counter and ptr to array" { test "slice and two counters, one is offset and one is runtime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const slice: []const u8 = "blah"; @@ -335,7 +333,6 @@ test "slice and two counters, one is offset and one is runtime" { test "two slices, one captured by-ref" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO var buf: [10]u8 = undefined; @@ -355,7 +352,6 @@ test "two slices, one captured by-ref" { test "raw pointer and slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO var buf: [10]u8 = undefined; -- cgit v1.2.3 From 3a30b827414926dceccf606d6557e00f1cc6458d Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 2 May 2023 01:27:21 -0400 Subject: x86_64: implement fieldParentPtr --- src/arch/x86_64/CodeGen.zig | 26 ++++++++++++++++++++++---- test/behavior/struct.zig | 1 - test/behavior/tuple.zig | 2 -- 3 files changed, 22 insertions(+), 7 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 51518485e4..521307ed6b 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -233,6 +233,13 @@ pub const MCValue = union(enum) { }; } + fn isRegisterOffset(mcv: MCValue) bool { + return switch (mcv) { + .register, .register_offset => true, + else => false, + }; + } + fn getReg(mcv: MCValue) ?Register { return switch (mcv) { .register => |reg| reg, @@ -4772,10 +4779,21 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - _ = ty_op; - return self.fail("TODO implement airFieldParentPtr for {}", .{self.target.cpu.arch}); - //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; + + const inst_ty = self.air.typeOfIndex(inst); + const parent_ty = inst_ty.childType(); + const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, self.target.*)); + + const src_mcv = try self.resolveInst(extra.field_ptr); + const dst_mcv = if (src_mcv.isRegisterOffset() and + self.reuseOperand(inst, extra.field_ptr, 0, src_mcv)) + src_mcv + else + try self.copyToRegisterWithInstTracking(inst, inst_ty, src_mcv); + const result = dst_mcv.offset(-field_offset); + return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none }); } fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index e533e34cc3..d5972b9161 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1333,7 +1333,6 @@ test "under-aligned struct field" { } test "fieldParentPtr of a zero-bit field" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 11cc8b2dce..c1e5f40a46 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -209,7 +209,6 @@ test "initializing anon struct with explicit type" { } test "fieldParentPtr of tuple" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -220,7 +219,6 @@ test "fieldParentPtr of tuple" { } test "fieldParentPtr of anon struct" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 40ef796278bd5ba405099609e03bdeacab7d3154 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 2 May 2023 02:14:29 -0400 Subject: x86_64: fix todo message typo --- src/arch/x86_64/CodeGen.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 521307ed6b..e2a1076ce9 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -9008,7 +9008,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; _ = extra; - return self.fail("TODO implement airAggregateInit for x86_64", .{}); + return self.fail("TODO implement airUnionInit for x86_64", .{}); //return self.finishAir(inst, result, .{ extra.init, .none, .none }); } -- cgit v1.2.3 From 9ccdbca635a3b5a26b65ab8e52533d3acc8f2f5e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 2 May 2023 03:24:04 -0400 Subject: x86_64: implement fabs --- src/arch/x86_64/CodeGen.zig | 20 ++++++++++++++------ src/arch/x86_64/Encoding.zig | 6 ++++++ src/arch/x86_64/Lower.zig | 6 ++++++ src/arch/x86_64/Mir.zig | 12 ++++++++++++ src/arch/x86_64/encodings.zig | 12 ++++++++++++ test/behavior/floatop.zig | 3 ++- 6 files changed, 52 insertions(+), 7 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e2a1076ce9..5685357108 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1458,14 +1458,13 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .log, .log2, .log10, - .fabs, .floor, .ceil, .round, .trunc_float, => try self.airUnaryMath(inst), - .neg => try self.airNeg(inst), + .neg, .fabs => try self.airFloatSign(inst), .add_with_overflow => try self.airAddSubWithOverflow(inst), .sub_with_overflow => try self.airAddSubWithOverflow(inst), @@ -4185,7 +4184,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } -fn airNeg(self: *Self, inst: Air.Inst.Index) !void { +fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); const ty_bits = ty.floatBits(self.target.*); @@ -4228,10 +4227,19 @@ fn airNeg(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_mcv.register); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + const tag = self.air.instructions.items(.tag)[inst]; try self.genBinOpMir(switch (ty_bits) { - 32 => .xorps, - 64 => .xorpd, - else => return self.fail("TODO implement airNeg for {}", .{ + 32 => switch (tag) { + .neg => .xorps, + .fabs => .andnps, + else => unreachable, + }, + 64 => switch (tag) { + .neg => .xorpd, + .fabs => .andnpd, + else => unreachable, + }, + else => return self.fail("TODO implement airFloatSign for {}", .{ ty.fmt(self.bin_file.options.module.?), }), }, vec_ty, dst_mcv, sign_mcv); diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 5cb7f7a2d9..bb1757c91c 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -268,23 +268,29 @@ pub const Mnemonic = enum { movd, // SSE addss, + andps, + andnps, cmpss, cvtsi2ss, divss, maxss, minss, movss, mulss, + orps, subss, ucomiss, xorps, // SSE2 addsd, + andpd, + andnpd, //cmpsd, cvtsd2ss, cvtsi2sd, cvtss2sd, divsd, maxsd, minsd, movq, //movd, movsd, mulsd, + orpd, subsd, ucomisd, xorpd, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index a961100687..03e395b171 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -94,6 +94,8 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .xor, .addss, + .andnps, + .andps, .cmpss, .cvtsi2ss, .divss, @@ -101,11 +103,14 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .minss, .movss, .mulss, + .orps, .roundss, .subss, .ucomiss, .xorps, .addsd, + .andnpd, + .andpd, .cmpsd, .cvtsd2ss, .cvtsi2sd, @@ -115,6 +120,7 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .minsd, .movsd, .mulsd, + .orpd, .roundsd, .subsd, .ucomisd, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index c14338b13d..f3d7a5a66f 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -168,6 +168,10 @@ pub const Inst = struct { /// Add single precision floating point values addss, + /// Bitwise logical and of packed single precision floating-point values + andps, + /// Bitwise logical and not of packed single precision floating-point values + andnps, /// Compare scalar single-precision floating-point values cmpss, /// Convert doubleword integer to scalar single-precision floating-point value @@ -182,6 +186,8 @@ pub const Inst = struct { movss, /// Multiply scalar single-precision floating-point values mulss, + /// Bitwise logical or of packed single precision floating-point values + orps, /// Round scalar single-precision floating-point values roundss, /// Subtract scalar single-precision floating-point values @@ -192,6 +198,10 @@ pub const Inst = struct { xorps, /// Add double precision floating point values addsd, + /// Bitwise logical and not of packed double precision floating-point values + andnpd, + /// Bitwise logical and of packed double precision floating-point values + andpd, /// Compare scalar double-precision floating-point values cmpsd, /// Convert scalar double-precision floating-point value to scalar single-precision floating-point value @@ -210,6 +220,8 @@ pub const Inst = struct { movsd, /// Multiply scalar double-precision floating-point values mulsd, + /// Bitwise logical or of packed double precision floating-point values + orpd, /// Round scalar double-precision floating-point values roundsd, /// Subtract scalar double-precision floating-point values diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index ac427c3633..35b2f13fe7 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -832,6 +832,10 @@ pub const table = [_]Entry{ // SSE .{ .addss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .sse }, + .{ .andnps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x55 }, 0, .sse }, + + .{ .andps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x54 }, 0, .sse }, + .{ .cmpss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0xf3, 0x0f, 0xc2 }, 0, .sse }, .{ .cvtsi2ss, .rm, &.{ .xmm, .rm32 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .sse }, @@ -848,6 +852,8 @@ pub const table = [_]Entry{ .{ .mulss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .sse }, + .{ .orps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .sse }, + .{ .subss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .sse }, .{ .ucomiss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0x0f, 0x2e }, 0, .sse }, @@ -857,6 +863,10 @@ pub const table = [_]Entry{ // SSE2 .{ .addsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x58 }, 0, .sse2 }, + .{ .andnpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x55 }, 0, .sse2 }, + + .{ .andpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x54 }, 0, .sse2 }, + .{ .cmpsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0xf2, 0x0f, 0xc2 }, 0, .sse2 }, .{ .cvtsd2ss, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .sse2 }, @@ -883,6 +893,8 @@ pub const table = [_]Entry{ .{ .mulsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x59 }, 0, .sse2 }, + .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .sse2 }, + .{ .subsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .sse2 }, .{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .sse2 }, diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index f713cd035c..ecf1473d14 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -96,7 +96,8 @@ test "negative f128 floatToInt at compile-time" { } test "@sqrt" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 36a39267b89583ad1dded42a54de90a7f9b5eacf Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 2 May 2023 21:01:59 -0400 Subject: x86_64: fix feature confusion --- src/arch/x86_64/CodeGen.zig | 7 ++++++- test/behavior/basic.zig | 8 -------- test/behavior/bugs/13069.zig | 2 -- test/behavior/cast.zig | 5 ----- test/behavior/enum.zig | 2 -- test/behavior/eval.zig | 2 -- test/behavior/floatop.zig | 10 ---------- test/behavior/fn.zig | 4 ---- test/behavior/generics.zig | 4 ---- test/behavior/math.zig | 2 -- test/behavior/maximum_minimum.zig | 4 ---- test/behavior/pointers.zig | 4 ---- test/behavior/struct.zig | 2 -- test/behavior/switch.zig | 2 -- test/behavior/union.zig | 14 -------------- 15 files changed, 6 insertions(+), 66 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 5685357108..9487cc33c2 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -5879,7 +5879,12 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s .load_got, .load_tlv, => { - const addr_reg = try self.copyToTmpRegister(ty, src_mcv.address()); + var ptr_pl = Type.Payload.ElemType{ + .base = .{ .tag = .single_const_pointer }, + .data = ty, + }; + const ptr_ty = Type.initPayload(&ptr_pl.base); + const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address()); return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg }, }); diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 86fc61c2c9..6fdd309371 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -363,8 +363,6 @@ fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA { } test "take address of parameter" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -392,8 +390,6 @@ test "array 2D const double ptr" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO const rect_2d_vertexes = [_][1]f32{ @@ -407,8 +403,6 @@ test "array 2D const double ptr with offset" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; const rect_2d_vertexes = [_][2]f32{ @@ -422,8 +416,6 @@ test "array 3D const double ptr with offset" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO const rect_3d_vertexes = [_][2][2]f32{ diff --git a/test/behavior/bugs/13069.zig b/test/behavior/bugs/13069.zig index 1c2526ef2a..41c5906ee6 100644 --- a/test/behavior/bugs/13069.zig +++ b/test/behavior/bugs/13069.zig @@ -6,8 +6,6 @@ test { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO var opt_x: ?[3]f32 = [_]f32{0.0} ** 3; diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index cbf1712f67..039e0a3d17 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -95,9 +95,6 @@ test "comptime_int @intToFloat" { test "@intToFloat" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -636,8 +633,6 @@ test "vector casts" { } test "@floatCast cast down" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 26b941bcdc..5f0037f6dc 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -940,8 +940,6 @@ test "constant enum initialization with differing sizes" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try test3_1(test3_foo); try test3_2(test3_bar); diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index c7285ef8a8..b12fcc7afa 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -535,8 +535,6 @@ test "static eval list init" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try expect(static_vec3.data[2] == 1.0); try expect(vec3(0.0, 0.0, 3.0).data[2] == 3.0); diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index ecf1473d14..b98d782da1 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -96,8 +96,6 @@ test "negative f128 floatToInt at compile-time" { } test "@sqrt" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -905,8 +903,6 @@ test "negation f16" { } test "negation f32" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -926,8 +922,6 @@ test "negation f32" { } test "negation f64" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1062,8 +1056,6 @@ test "nan negation f16" { } test "nan negation f32" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1083,8 +1075,6 @@ test "nan negation f32" { test "nan negation f64" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 71b7b36c21..4ff5e20378 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -338,8 +338,6 @@ test "function call with anon list literal" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -360,8 +358,6 @@ test "function call with anon list literal - 2D" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 367f2113d9..e7c053e36c 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -59,8 +59,6 @@ test "fn with comptime args" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try expect(gimmeTheBigOne(1234, 5678) == 5678); try expect(shouldCallSameInstance(34, 12) == 34); @@ -71,8 +69,6 @@ test "anytype params" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try expect(max_i32(12, 34) == 34); try expect(max_f64(1.2, 3.4) == 3.4); diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 01b927b913..f9c9f43927 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -203,8 +203,6 @@ test "float equality" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const x: f64 = 0.012; const y: f64 = x + 1.0; diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index d7b93c56c0..e6a8553e8c 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -8,8 +8,6 @@ test "@max" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -56,8 +54,6 @@ test "@min" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 0bd8388660..cfce97b550 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -206,8 +206,6 @@ test "allowzero pointer and slice" { } test "assign null directly to C pointer and test null equality" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -346,8 +344,6 @@ test "pointer sentinel with +inf" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index d5972b9161..0ca7f70de1 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -744,8 +744,6 @@ var g_foo: S0 = S0.init(); test "packed struct with fp fields" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 5e2d6d28c1..a32a762e04 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -230,8 +230,6 @@ test "switch prong with variable" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try switchProngWithVarFn(SwitchProngWithVarEnum{ .One = 13 }); try switchProngWithVarFn(SwitchProngWithVarEnum{ .Two = 13.0 }); diff --git a/test/behavior/union.zig b/test/behavior/union.zig index d8989c21f0..41842f5bc5 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -14,8 +14,6 @@ test "basic unions with floats" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO var foo = FooWithFloats{ .int = 1 }; try expect(foo.int == 1); @@ -31,8 +29,6 @@ test "init union with runtime value - floats" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO var foo: FooWithFloats = undefined; @@ -220,8 +216,6 @@ test "union with specified enum tag" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try doTest(); comptime try doTest(); @@ -231,8 +225,6 @@ test "packed union generates correctly aligned type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const U = packed union { f1: *const fn () error{TestUnexpectedResult}!void, @@ -910,8 +902,6 @@ test "anonymous union literal syntax" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { const Number = union { @@ -1064,8 +1054,6 @@ test "containers with single-field enums" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { const A = union(enum) { f1 }; @@ -1524,8 +1512,6 @@ test "reinterpreting enum value inside packed union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const U = packed union { tag: enum { a, b }, -- cgit v1.2.3 From a5e50891cbd66c740afba722e94f841a2b43c1fc Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 2 May 2023 21:22:30 -0400 Subject: x86_64: optimize code size for double neg/abs --- src/arch/x86_64/CodeGen.zig | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 9487cc33c2..9e50e6b5b4 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4229,16 +4229,12 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; try self.genBinOpMir(switch (ty_bits) { - 32 => switch (tag) { + // No point using an extra prefix byte for *pd which performs the same operation. + 32, 64 => switch (tag) { .neg => .xorps, .fabs => .andnps, else => unreachable, }, - 64 => switch (tag) { - .neg => .xorpd, - .fabs => .andnpd, - else => unreachable, - }, else => return self.fail("TODO implement airFloatSign for {}", .{ ty.fmt(self.bin_file.options.module.?), }), -- cgit v1.2.3 From 6893f90887836584f9377793cca7235d8947a326 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 3 May 2023 02:37:48 -0400 Subject: x86_64: implement sqrt --- src/arch/x86_64/CodeGen.zig | 28 +++++++++++++++++++++++++++- src/arch/x86_64/Encoding.zig | 4 ++++ src/arch/x86_64/Lower.zig | 4 ++++ src/arch/x86_64/Mir.zig | 8 ++++++++ src/arch/x86_64/encodings.zig | 6 ++++++ 5 files changed, 49 insertions(+), 1 deletion(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 9e50e6b5b4..ee621ffd87 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -229,6 +229,7 @@ pub const MCValue = union(enum) { fn isRegister(mcv: MCValue) bool { return switch (mcv) { .register => true, + .register_offset => |reg_off| return reg_off.off == 0, else => false, }; } @@ -1449,7 +1450,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .shl_sat => try self.airShlSat(inst), .slice => try self.airSlice(inst), - .sqrt, .sin, .cos, .tan, @@ -1464,6 +1464,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .trunc_float, => try self.airUnaryMath(inst), + .sqrt => try self.airSqrt(inst), .neg, .fabs => try self.airFloatSign(inst), .add_with_overflow => try self.airAddSubWithOverflow(inst), @@ -4242,6 +4243,31 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } +fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const ty = self.air.typeOf(un_op); + + const src_mcv = try self.resolveInst(un_op); + const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) + src_mcv + else + try self.copyToRegisterWithInstTracking(inst, ty, src_mcv); + + try self.genBinOpMir(switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .sqrtss, + 64 => .sqrtsd, + else => return self.fail("TODO implement airSqrt for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }, + else => return self.fail("TODO implement airSqrt for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }, ty, dst_mcv, src_mcv); + return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); +} + fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; _ = un_op; diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index bb1757c91c..13d7b1776d 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -277,6 +277,8 @@ pub const Mnemonic = enum { movss, mulss, orps, + sqrtps, + sqrtss, subss, ucomiss, xorps, @@ -291,6 +293,8 @@ pub const Mnemonic = enum { movq, //movd, movsd, mulsd, orpd, + sqrtpd, + sqrtsd, subsd, ucomisd, xorpd, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 03e395b171..b369ba2a6b 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -105,6 +105,8 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .mulss, .orps, .roundss, + .sqrtps, + .sqrtss, .subss, .ucomiss, .xorps, @@ -122,6 +124,8 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .mulsd, .orpd, .roundsd, + .sqrtpd, + .sqrtsd, .subsd, .ucomisd, .xorpd, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index f3d7a5a66f..95efc0a96c 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -190,7 +190,11 @@ pub const Inst = struct { orps, /// Round scalar single-precision floating-point values roundss, + /// Square root of scalar single precision floating-point value + sqrtps, /// Subtract scalar single-precision floating-point values + sqrtss, + /// Square root of single precision floating-point values subss, /// Unordered compare scalar single-precision floating-point values ucomiss, @@ -224,6 +228,10 @@ pub const Inst = struct { orpd, /// Round scalar double-precision floating-point values roundsd, + /// Square root of double precision floating-point values + sqrtpd, + /// Square root of scalar double precision floating-point value + sqrtsd, /// Subtract scalar double-precision floating-point values subsd, /// Unordered compare scalar double-precision floating-point values diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 35b2f13fe7..5c443157c5 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -856,6 +856,9 @@ pub const table = [_]Entry{ .{ .subss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .sse }, + .{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .sse }, + .{ .sqrtss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .sse }, + .{ .ucomiss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0x0f, 0x2e }, 0, .sse }, .{ .xorps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x57 }, 0, .sse }, @@ -895,6 +898,9 @@ pub const table = [_]Entry{ .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .sse2 }, + .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .sse2 }, + .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .sse2 }, + .{ .subsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .sse2 }, .{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .sse2 }, -- cgit v1.2.3 From a19faa2481e84e065a8762cb7c7cbf35426929fd Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 3 May 2023 04:21:40 -0400 Subject: x86_64: implement movement of more types * f16 * f128 * vector --- src/arch/x86_64/CodeGen.zig | 221 ++++++++++++++++++++++++++++-------------- src/arch/x86_64/Encoding.zig | 6 +- src/arch/x86_64/Lower.zig | 13 +++ src/arch/x86_64/Mir.zig | 14 +++ src/arch/x86_64/encodings.zig | 18 ++++ test/behavior/vector.zig | 6 -- 6 files changed, 198 insertions(+), 80 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ee621ffd87..97e672b71f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1210,6 +1210,28 @@ fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) ! }); } +fn asmRegisterMemoryImmediate( + self: *Self, + tag: Mir.Inst.Tag, + reg: Register, + m: Memory, + imm: Immediate, +) !void { + _ = try self.addInst(.{ + .tag = tag, + .ops = switch (m) { + .sib => .rmi_sib, + .rip => .rmi_rip, + else => unreachable, + }, + .data = .{ .rix = .{ .r = reg, .i = @intCast(u8, imm.unsigned), .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + } } }, + }); +} + fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) !void { _ = try self.addInst(.{ .tag = tag, @@ -1951,7 +1973,7 @@ fn allocRegOrMemAdvanced(self: *Self, elem_ty: Type, inst: ?Air.Inst.Index, reg_ const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { - if (self.register_manager.tryAllocReg(inst, try self.regClassForType(elem_ty))) |reg| { + if (self.register_manager.tryAllocReg(inst, regClassForType(elem_ty))) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } @@ -1961,14 +1983,9 @@ fn allocRegOrMemAdvanced(self: *Self, elem_ty: Type, inst: ?Air.Inst.Index, reg_ return .{ .load_frame = .{ .index = frame_index } }; } -fn regClassForType(self: *Self, ty: Type) !RegisterManager.RegisterBitSet { +fn regClassForType(ty: Type) RegisterManager.RegisterBitSet { return switch (ty.zigTypeTag()) { - .Vector => self.fail("TODO regClassForType for {}", .{ty.fmt(self.bin_file.options.module.?)}), - .Float => switch (ty.floatBits(self.target.*)) { - 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) sse else gp, - 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) sse else gp, - else => gp, - }, + .Float, .Vector => sse, else => gp, }; } @@ -2111,7 +2128,7 @@ pub fn spillRegisters(self: *Self, registers: []const Register) !void { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, try self.regClassForType(ty)); + const reg = try self.register_manager.allocReg(null, regClassForType(ty)); try self.genSetReg(reg, ty, mcv); return reg; } @@ -2126,7 +2143,7 @@ fn copyToRegisterWithInstTracking( ty: Type, mcv: MCValue, ) !MCValue { - const reg: Register = try self.register_manager.allocReg(reg_owner, try self.regClassForType(ty)); + const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty)); try self.genSetReg(reg, ty, mcv); return MCValue{ .register = reg }; } @@ -2159,8 +2176,7 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { if (dst_ty.floatBits(self.target.*) != 32 or src_ty.floatBits(self.target.*) != 64 or !Target.x86.featureSetHas(self.target.cpu.features, .sse2)) return self.fail("TODO implement airFptrunc from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2182,8 +2198,7 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { if (dst_ty.floatBits(self.target.*) != 64 or src_ty.floatBits(self.target.*) != 32 or !Target.x86.featureSetHas(self.target.cpu.features, .sse2)) return self.fail("TODO implement airFpext from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -4436,8 +4451,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.air.typeOf(ty_op.operand); const elem_size = elem_ty.abiSize(self.target.*); - const elem_rc = try self.regClassForType(elem_ty); - const ptr_rc = try self.regClassForType(ptr_ty); + const elem_rc = regClassForType(elem_ty); + const ptr_rc = regClassForType(ptr_ty); const ptr_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (elem_size <= 8 and elem_rc.supersetOf(ptr_rc) and @@ -5257,8 +5272,7 @@ fn genMulDivBinOp( .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2, .div_trunc, .div_floor, .div_exact, .rem, .mod => dst_abi_size != src_abi_size, } or src_abi_size > 8) return self.fail("TODO implement genMulDivBinOp from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }); const ty = if (dst_abi_size <= 8) dst_ty else src_ty; const abi_size = if (dst_abi_size <= 8) dst_abi_size else src_abi_size; @@ -5558,7 +5572,9 @@ fn genBinOp( }, lhs_ty, dst_mcv, src_mcv), .mul => try self.genBinOpMir(switch (lhs_ty.zigTypeTag()) { - else => return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?) }), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) .mulss @@ -5761,9 +5777,13 @@ fn genBinOp( .max => .maxsd, else => unreachable, }, - else => return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?) }), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), }, lhs_ty, dst_mcv, src_mcv), - else => return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?) }), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), }, else => unreachable, @@ -5802,8 +5822,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s .Float => { if (!Target.x86.featureSetHas(self.target.cpu.features, .sse)) return self.fail("TODO genBinOpMir for {s} {} without sse", .{ - @tagName(mir_tag), - ty.fmt(self.bin_file.options.module.?), + @tagName(mir_tag), ty.fmt(self.bin_file.options.module.?), }); return self.asmRegisterRegister(mir_tag, dst_reg.to128(), src_reg.to128()); }, @@ -7588,10 +7607,11 @@ fn movMirTag(self: *Self, ty: Type) !Mir.Inst.Tag { return switch (ty.zigTypeTag()) { else => .mov, .Float => switch (ty.floatBits(self.target.*)) { - 16 => .mov, - 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) .movss else .mov, - 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) .movsd else .mov, - else => return self.fail("TODO movMirTag for {}", .{ + 16 => unreachable, // needs special handling + 32 => .movss, + 64 => .movsd, + 128 => .movaps, + else => return self.fail("TODO movMirTag from {}", .{ ty.fmt(self.bin_file.options.module.?), }), }, @@ -7700,8 +7720,17 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }, .register => |src_reg| if (dst_reg.id() != src_reg.id()) try self.asmRegisterRegister( if ((dst_reg.class() == .floating_point) == (src_reg.class() == .floating_point)) - try self.movMirTag(ty) + switch (ty.zigTypeTag()) { + else => .mov, + .Float, .Vector => .movaps, + } else switch (abi_size) { + 2 => return try self.asmRegisterRegisterImmediate( + if (dst_reg.class() == .floating_point) .pinsrw else .pextrw, + registerAlias(dst_reg, abi_size), + registerAlias(src_reg, abi_size), + Immediate.u(0), + ), 4 => .movd, 8 => .movq, else => return self.fail( @@ -7712,18 +7741,12 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr registerAlias(dst_reg, abi_size), registerAlias(src_reg, abi_size), ), - .register_offset, .indirect, .load_frame, .lea_frame => try self.asmRegisterMemory( - switch (src_mcv) { - .register_offset => |reg_off| switch (reg_off.off) { - 0 => return self.genSetReg(dst_reg, ty, .{ .register = reg_off.reg }), - else => .lea, - }, - .indirect, .load_frame => try self.movMirTag(ty), - .lea_frame => .lea, - else => unreachable, - }, - registerAlias(dst_reg, abi_size), - Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) { + .register_offset, + .indirect, + .load_frame, + .lea_frame, + => { + const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) { .register_offset, .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, .disp = reg_off.off, @@ -7733,20 +7756,51 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .disp = frame_addr.off, }, else => unreachable, - }), - ), + }); + if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) + try self.asmRegisterMemoryImmediate( + .pinsrw, + registerAlias(dst_reg, abi_size), + src_mem, + Immediate.u(0), + ) + else + try self.asmRegisterMemory( + switch (src_mcv) { + .register_offset => |reg_off| switch (reg_off.off) { + 0 => return self.genSetReg(dst_reg, ty, .{ .register = reg_off.reg }), + else => .lea, + }, + .indirect, .load_frame => try self.movMirTag(ty), + .lea_frame => .lea, + else => unreachable, + }, + registerAlias(dst_reg, abi_size), + src_mem, + ); + }, .memory, .load_direct, .load_got, .load_tlv => { switch (src_mcv) { - .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| - return self.asmRegisterMemory( - try self.movMirTag(ty), - registerAlias(dst_reg, abi_size), - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ - .base = .{ .reg = .ds }, - .disp = small_addr, - }), - ), - .load_direct => |sym_index| if (try self.movMirTag(ty) == .mov) { + .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| { + const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ + .base = .{ .reg = .ds }, + .disp = small_addr, + }); + return if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) + self.asmRegisterMemoryImmediate( + .pinsrw, + registerAlias(dst_reg, abi_size), + src_mem, + Immediate.u(0), + ) + else + self.asmRegisterMemory( + try self.movMirTag(ty), + registerAlias(dst_reg, abi_size), + src_mem, + ); + }, + .load_direct => |sym_index| if (!ty.isRuntimeFloat()) { const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ .tag = .mov_linker, @@ -7767,11 +7821,22 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); - try self.asmRegisterMemory( - try self.movMirTag(ty), - registerAlias(dst_reg, abi_size), - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = addr_reg } }), - ); + const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ + .base = .{ .reg = addr_reg }, + }); + if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) + try self.asmRegisterMemoryImmediate( + .pinsrw, + registerAlias(dst_reg, abi_size), + src_mem, + Immediate.u(0), + ) + else + try self.asmRegisterMemory( + try self.movMirTag(ty), + registerAlias(dst_reg, abi_size), + src_mem, + ); }, .lea_direct, .lea_got => |sym_index| { const atom_index = try self.owner.getSymbolIndex(self); @@ -7864,11 +7929,25 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal }, }, .eflags => |cc| try self.asmSetccMemory(Memory.sib(.byte, .{ .base = base, .disp = disp }), cc), - .register => |reg| try self.asmMemoryRegister( - try self.movMirTag(ty), - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }), - registerAlias(reg, abi_size), - ), + .register => |src_reg| { + const dst_mem = Memory.sib( + Memory.PtrSize.fromSize(abi_size), + .{ .base = base, .disp = disp }, + ); + if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) + try self.asmMemoryRegisterImmediate( + .pextrw, + dst_mem, + registerAlias(src_reg, abi_size), + Immediate.u(0), + ) + else + try self.asmMemoryRegister( + try self.movMirTag(ty), + dst_mem, + registerAlias(src_reg, abi_size), + ); + }, .register_overflow => |ro| { try self.genSetMem( base, @@ -8071,8 +8150,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const src_ty = self.air.typeOf(ty_op.operand); const result = result: { - const dst_rc = try self.regClassForType(dst_ty); - const src_rc = try self.regClassForType(src_ty); + const dst_rc = regClassForType(dst_ty); + const src_rc = regClassForType(src_ty); const operand = try self.resolveInst(ty_op.operand); if (dst_rc.supersetOf(src_rc) and self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand; @@ -8127,8 +8206,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { .unsigned => src_bits + 1, }, 32), 8) catch unreachable; if (src_size > 8) return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -8141,7 +8219,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg); - const dst_reg = try self.register_manager.allocReg(inst, try self.regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); @@ -8151,19 +8229,16 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { .cvtsi2ss else return self.fail("TODO implement airIntToFloat from {} to {} without sse", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) .cvtsi2sd else return self.fail("TODO implement airIntToFloat from {} to {} without sse2", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), else => return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), }, dst_reg.to128(), registerAlias(src_reg, src_size)); diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 13d7b1776d..944fe85458 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -274,9 +274,11 @@ pub const Mnemonic = enum { cvtsi2ss, divss, maxss, minss, - movss, + movaps, movss, movups, mulss, orps, + pextrw, + pinsrw, sqrtps, sqrtss, subss, @@ -290,7 +292,9 @@ pub const Mnemonic = enum { cvtsd2ss, cvtsi2sd, cvtss2sd, divsd, maxsd, minsd, + movapd, movq, //movd, movsd, + movupd, mulsd, orpd, sqrtpd, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index b369ba2a6b..4289cfaf2a 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -101,9 +101,13 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .divss, .maxss, .minss, + .movaps, .movss, + .movups, .mulss, .orps, + .pextrw, + .pinsrw, .roundss, .sqrtps, .sqrtss, @@ -198,6 +202,8 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { .mi_rip_u, .lock_mi_sib_u, .lock_mi_rip_u, + .rmi_sib, + .rmi_rip, .mri_sib, .mri_rip, => Immediate.u(i), @@ -212,6 +218,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { return lower.mir.resolveFrameLoc(switch (ops) { .rm_sib, .rm_sib_cc, + .rmi_sib, .m_sib, .m_sib_cc, .mi_sib_u, @@ -227,6 +234,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { .rm_rip, .rm_rip_cc, + .rmi_rip, .m_rip, .m_rip_cc, .mi_rip_u, @@ -321,6 +329,11 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { .{ .reg = inst.data.rx.r }, .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, }, + .rmi_sib, .rmi_rip => &.{ + .{ .reg = inst.data.rix.r }, + .{ .mem = lower.mem(inst.ops, inst.data.rix.payload) }, + .{ .imm = lower.imm(inst.ops, inst.data.rix.i) }, + }, .mr_sib, .lock_mr_sib, .mr_rip, .lock_mr_rip => &.{ .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, .{ .reg = inst.data.rx.r }, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 95efc0a96c..6b2db1b696 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -182,12 +182,20 @@ pub const Inst = struct { maxss, /// Return minimum single-precision floating-point value minss, + /// Move aligned packed single-precision floating-point values + movaps, /// Move scalar single-precision floating-point value movss, + /// Move unaligned packed single-precision floating-point values + movups, /// Multiply scalar single-precision floating-point values mulss, /// Bitwise logical or of packed single precision floating-point values orps, + /// Extract word + pextrw, + /// Insert word + pinsrw, /// Round scalar single-precision floating-point values roundss, /// Square root of scalar single precision floating-point value @@ -346,6 +354,12 @@ pub const Inst = struct { /// Register, memory (RIP) operands with condition code (CC). /// Uses `rx_cc` payload. rm_rip_cc, + /// Register, memory (SIB), immediate (byte) operands. + /// Uses `rix` payload with extra data of type `MemorySib`. + rmi_sib, + /// Register, memory (RIP), immediate (byte) operands. + /// Uses `rix` payload with extra data of type `MemoryRip`. + rmi_rip, /// Single memory (SIB) operand. /// Uses `payload` with extra data of type `MemorySib`. m_sib, diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 5c443157c5..f87a110e99 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -847,9 +847,15 @@ pub const table = [_]Entry{ .{ .minss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5d }, 0, .sse }, + .{ .movaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .sse }, + .{ .movaps, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x29 }, 0, .sse }, + .{ .movss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .sse }, .{ .movss, .mr, &.{ .xmm_m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .sse }, + .{ .movups, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x10 }, 0, .sse }, + .{ .movups, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x11 }, 0, .sse }, + .{ .mulss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .sse }, .{ .orps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .sse }, @@ -885,6 +891,9 @@ pub const table = [_]Entry{ .{ .minsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5d }, 0, .sse2 }, + .{ .movapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .sse2 }, + .{ .movapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .sse2 }, + .{ .movd, .rm, &.{ .xmm, .rm32 }, &.{ 0x66, 0x0f, 0x6e }, 0, .sse2 }, .{ .movd, .mr, &.{ .rm32, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .sse2 }, @@ -894,10 +903,17 @@ pub const table = [_]Entry{ .{ .movq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .sse2 }, .{ .movq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .sse2 }, + .{ .movupd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x10 }, 0, .sse2 }, + .{ .movupd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x11 }, 0, .sse2 }, + .{ .mulsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x59 }, 0, .sse2 }, .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .sse2 }, + .{ .pextrw, .mri, &.{ .r16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .sse2 }, + + .{ .pinsrw, .rmi, &.{ .xmm, .rm16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .sse2 }, + .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .sse2 }, .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .sse2 }, @@ -911,6 +927,8 @@ pub const table = [_]Entry{ .{ .xorpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .sse2 }, // SSE4.1 + .{ .pextrw, .mri, &.{ .rm16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .sse4_1 }, + .{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .sse4_1 }, .{ .roundsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .sse4_1 }, }; diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 01c76310d7..2c55af5f85 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -133,7 +133,6 @@ test "vector bit operators" { } test "implicit cast vector to array" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -151,7 +150,6 @@ test "implicit cast vector to array" { } test "array to vector" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -321,7 +319,6 @@ test "load vector elements via comptime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -343,7 +340,6 @@ test "store vector elements via comptime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -371,7 +367,6 @@ test "load vector elements via runtime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -393,7 +388,6 @@ test "store vector elements via runtime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { -- cgit v1.2.3 From 9bea854dc2af293cba1d000b31f6e82d9c431285 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 4 May 2023 03:36:28 -0400 Subject: x86_64: implement `@floor`, `@ceil`, and `@trunc` --- src/arch/x86_64/CodeGen.zig | 44 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 97e672b71f..fbed0bcf96 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1480,12 +1480,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .log, .log2, .log10, - .floor, - .ceil, .round, - .trunc_float, => try self.airUnaryMath(inst), + .floor => try self.airRound(inst, Immediate.u(0b1_0_01)), + .ceil => try self.airRound(inst, Immediate.u(0b1_0_10)), + .trunc_float => try self.airRound(inst, Immediate.u(0b1_0_11)), .sqrt => try self.airSqrt(inst), .neg, .fabs => try self.airFloatSign(inst), @@ -4258,6 +4258,44 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } +fn airRound(self: *Self, inst: Air.Inst.Index, mode: Immediate) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const ty = self.air.typeOf(un_op); + + if (!Target.x86.featureSetHas(self.target.cpu.features, .sse4_1)) + return self.fail("TODO implement airRound without sse4_1 feature", .{}); + + const src_mcv = try self.resolveInst(un_op); + const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) + src_mcv + else + try self.copyToRegisterWithInstTracking(inst, ty, src_mcv); + + const mir_tag: Mir.Inst.Tag = switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .roundss, + 64 => .roundsd, + else => return self.fail("TODO implement airRound for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }, + else => return self.fail("TODO implement airRound for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }; + assert(dst_mcv.isRegister()); + if (src_mcv.isRegister()) + try self.asmRegisterRegisterImmediate(mir_tag, dst_mcv.getReg().?, src_mcv.getReg().?, mode) + else + try self.asmRegisterMemoryImmediate( + mir_tag, + dst_mcv.getReg().?, + src_mcv.mem(Memory.PtrSize.fromSize(@intCast(u32, ty.abiSize(self.target.*)))), + mode, + ); + return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); +} + fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); -- cgit v1.2.3 From 1a261917ce41efb49fe41ea0c6d9083212c17797 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 4 May 2023 03:36:04 -0400 Subject: x86_64: implement `@ctz` and `@clz` for `u128` --- src/arch/x86_64/CodeGen.zig | 93 ++++++++++++++++++++++++++++++++++----------- test/behavior/bugs/2114.zig | 3 +- test/behavior/math.zig | 3 +- 3 files changed, 74 insertions(+), 25 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index fbed0bcf96..55b18985da 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3798,19 +3798,38 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const dst_reg = try self.register_manager.allocReg(inst, gp); const dst_mcv = MCValue{ .register = dst_reg }; - const dst_lock = self.register_manager.lockReg(dst_reg); - defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); + defer self.register_manager.unlockReg(dst_lock); + const src_bits = src_ty.bitSize(self.target.*); if (Target.x86.featureSetHas(self.target.cpu.features, .lzcnt)) { - try self.genBinOpMir(.lzcnt, src_ty, dst_mcv, mat_src_mcv); - const extra_bits = self.regExtraBits(src_ty); - if (extra_bits > 0) { - try self.genBinOpMir(.sub, dst_ty, dst_mcv, .{ .immediate = extra_bits }); - } + if (src_bits <= 64) { + try self.genBinOpMir(.lzcnt, src_ty, dst_mcv, mat_src_mcv); + + const extra_bits = self.regExtraBits(src_ty); + if (extra_bits > 0) { + try self.genBinOpMir(.sub, dst_ty, dst_mcv, .{ .immediate = extra_bits }); + } + } else if (src_bits <= 128) { + const tmp_reg = try self.register_manager.allocReg(null, gp); + const tmp_mcv = MCValue{ .register = tmp_reg }; + const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); + defer self.register_manager.unlockReg(tmp_lock); + + try self.genBinOpMir(.lzcnt, Type.u64, dst_mcv, mat_src_mcv); + try self.genBinOpMir(.add, dst_ty, dst_mcv, .{ .immediate = 64 }); + try self.genBinOpMir(.lzcnt, Type.u64, tmp_mcv, mat_src_mcv.address().offset(8).deref()); + try self.asmCmovccRegisterRegister(dst_reg.to32(), tmp_reg.to32(), .nc); + + if (src_bits < 128) { + try self.genBinOpMir(.sub, dst_ty, dst_mcv, .{ .immediate = 128 - src_bits }); + } + } else return self.fail("TODO airClz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); break :result dst_mcv; } - const src_bits = src_ty.bitSize(self.target.*); + if (src_bits > 64) + return self.fail("TODO airClz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); if (math.isPowerOfTwo(src_bits)) { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits ^ (src_bits - 1), @@ -3870,24 +3889,52 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); if (Target.x86.featureSetHas(self.target.cpu.features, .bmi)) { - const extra_bits = self.regExtraBits(src_ty); - const masked_mcv = if (extra_bits > 0) masked: { - const mask_mcv = MCValue{ - .immediate = ((@as(u64, 1) << @intCast(u6, extra_bits)) - 1) << - @intCast(u6, src_bits), - }; - const tmp_mcv = tmp: { - if (src_mcv.isImmediate() or self.liveness.operandDies(inst, 0)) break :tmp src_mcv; - try self.genSetReg(dst_reg, src_ty, src_mcv); - break :tmp dst_mcv; - }; - try self.genBinOpMir(.@"or", src_ty, tmp_mcv, mask_mcv); - break :masked tmp_mcv; - } else mat_src_mcv; - try self.genBinOpMir(.tzcnt, src_ty, dst_mcv, masked_mcv); + if (src_bits <= 64) { + const extra_bits = self.regExtraBits(src_ty); + const masked_mcv = if (extra_bits > 0) masked: { + const tmp_mcv = tmp: { + if (src_mcv.isImmediate() or self.liveness.operandDies(inst, 0)) + break :tmp src_mcv; + try self.genSetReg(dst_reg, src_ty, src_mcv); + break :tmp dst_mcv; + }; + try self.genBinOpMir( + .@"or", + src_ty, + tmp_mcv, + .{ .immediate = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - extra_bits)) << + @intCast(u6, src_bits) }, + ); + break :masked tmp_mcv; + } else mat_src_mcv; + try self.genBinOpMir(.tzcnt, src_ty, dst_mcv, masked_mcv); + } else if (src_bits <= 128) { + const tmp_reg = try self.register_manager.allocReg(null, gp); + const tmp_mcv = MCValue{ .register = tmp_reg }; + const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); + defer self.register_manager.unlockReg(tmp_lock); + + const masked_mcv = if (src_bits < 128) masked: { + try self.genCopy(Type.u64, dst_mcv, mat_src_mcv.address().offset(8).deref()); + try self.genBinOpMir( + .@"or", + Type.u64, + dst_mcv, + .{ .immediate = @as(u64, math.maxInt(u64)) << @intCast(u6, src_bits - 64) }, + ); + break :masked dst_mcv; + } else mat_src_mcv.address().offset(8).deref(); + try self.genBinOpMir(.tzcnt, Type.u64, dst_mcv, masked_mcv); + try self.genBinOpMir(.add, dst_ty, dst_mcv, .{ .immediate = 64 }); + try self.genBinOpMir(.tzcnt, Type.u64, tmp_mcv, mat_src_mcv); + try self.asmCmovccRegisterRegister(dst_reg.to32(), tmp_reg.to32(), .nc); + } else return self.fail("TODO airCtz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); break :result dst_mcv; } + if (src_bits > 64) + return self.fail("TODO airCtz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); + const width_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits }); try self.genBinOpMir(.bsf, src_ty, dst_mcv, mat_src_mcv); diff --git a/test/behavior/bugs/2114.zig b/test/behavior/bugs/2114.zig index f92728eff6..3ad4a97b80 100644 --- a/test/behavior/bugs/2114.zig +++ b/test/behavior/bugs/2114.zig @@ -9,7 +9,8 @@ fn ctz(x: anytype) usize { test "fixed" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .bmi)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/math.zig b/test/behavior/math.zig index f9c9f43927..0362bd3a2b 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -77,7 +77,8 @@ fn testClz() !void { } test "@clz big ints" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .lzcnt)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 32ab930f1d39c374265ae14f1de9d837dcd7f650 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 5 May 2023 01:32:39 -0400 Subject: x86_64: implement f16 conversions when supported --- src/arch/x86_64/CodeGen.zig | 66 +- src/arch/x86_64/Encoding.zig | 95 ++- src/arch/x86_64/Lower.zig | 3 + src/arch/x86_64/Mir.zig | 5 + src/arch/x86_64/encoder.zig | 160 +++- src/arch/x86_64/encodings.zig | 1746 +++++++++++++++++++++-------------------- test/behavior/vector.zig | 3 +- 7 files changed, 1151 insertions(+), 927 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 55b18985da..b7fd81db68 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2172,12 +2172,9 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.air.typeOfIndex(inst); + const dst_bits = dst_ty.floatBits(self.target.*); const src_ty = self.air.typeOf(ty_op.operand); - if (dst_ty.floatBits(self.target.*) != 32 or src_ty.floatBits(self.target.*) != 64 or - !Target.x86.featureSetHas(self.target.cpu.features, .sse2)) - return self.fail("TODO implement airFptrunc from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), - }); + const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) @@ -2187,19 +2184,32 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_mcv.register); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - try self.genBinOpMir(.cvtsd2ss, src_ty, dst_mcv, src_mcv); + if (src_bits == 32 and dst_bits == 16 and self.hasFeature(.f16c)) + try self.asmRegisterRegisterImmediate( + .vcvtps2ph, + dst_mcv.register, + if (src_mcv.isRegister()) src_mcv.getReg().? else src_reg: { + const src_reg = dst_mcv.register; + try self.genSetReg(src_reg, src_ty, src_mcv); + break :src_reg src_reg; + }, + Immediate.u(0b1_00), + ) + else if (src_bits == 64 and dst_bits == 32) + try self.genBinOpMir(.cvtsd2ss, src_ty, dst_mcv, src_mcv) + else + return self.fail("TODO implement airFptrunc from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + }); return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } fn airFpext(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.air.typeOfIndex(inst); + const dst_bits = dst_ty.floatBits(self.target.*); const src_ty = self.air.typeOf(ty_op.operand); - if (dst_ty.floatBits(self.target.*) != 64 or src_ty.floatBits(self.target.*) != 32 or - !Target.x86.featureSetHas(self.target.cpu.features, .sse2)) - return self.fail("TODO implement airFpext from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), - }); + const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) @@ -2209,7 +2219,19 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_mcv.register); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - try self.genBinOpMir(.cvtss2sd, src_ty, dst_mcv, src_mcv); + try self.genBinOpMir( + if (src_bits == 16 and dst_bits == 32 and self.hasFeature(.f16c)) + .vcvtph2ps + else if (src_bits == 32 and dst_bits == 64) + .cvtss2sd + else + return self.fail("TODO implement airFpext from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + }), + src_ty, + dst_mcv, + src_mcv, + ); return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } @@ -3802,7 +3824,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(dst_lock); const src_bits = src_ty.bitSize(self.target.*); - if (Target.x86.featureSetHas(self.target.cpu.features, .lzcnt)) { + if (self.hasFeature(.lzcnt)) { if (src_bits <= 64) { try self.genBinOpMir(.lzcnt, src_ty, dst_mcv, mat_src_mcv); @@ -3888,7 +3910,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - if (Target.x86.featureSetHas(self.target.cpu.features, .bmi)) { + if (self.hasFeature(.bmi)) { if (src_bits <= 64) { const extra_bits = self.regExtraBits(src_ty); const masked_mcv = if (extra_bits > 0) masked: { @@ -3956,7 +3978,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); const src_mcv = try self.resolveInst(ty_op.operand); - if (Target.x86.featureSetHas(self.target.cpu.features, .popcnt)) { + if (self.hasFeature(.popcnt)) { const mat_src_mcv = switch (src_mcv) { .immediate => MCValue{ .register = try self.copyToTmpRegister(src_ty, src_mcv) }, else => src_mcv, @@ -4309,7 +4331,7 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: Immediate) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); - if (!Target.x86.featureSetHas(self.target.cpu.features, .sse4_1)) + if (!self.hasFeature(.sse4_1)) return self.fail("TODO implement airRound without sse4_1 feature", .{}); const src_mcv = try self.resolveInst(un_op); @@ -5712,7 +5734,7 @@ fn genBinOp( => {}, .div_trunc, .div_floor, - => if (Target.x86.featureSetHas(self.target.cpu.features, .sse4_1)) { + => if (self.hasFeature(.sse4_1)) { const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*)); const dst_alias = registerAlias(dst_mcv.register, abi_size); try self.asmRegisterRegisterImmediate(switch (lhs_ty.floatBits(self.target.*)) { @@ -9593,3 +9615,13 @@ fn regBitSize(self: *Self, ty: Type) u64 { fn regExtraBits(self: *Self, ty: Type) u64 { return self.regBitSize(ty) - ty.bitSize(self.target.*); } + +fn hasFeature(self: *Self, feature: Target.x86.Feature) bool { + return Target.x86.featureSetHas(self.target.cpu.features, feature); +} +fn hasAnyFeatures(self: *Self, features: anytype) bool { + return Target.x86.featureSetHasAny(self.target.cpu.features, features); +} +fn hasAllFeatures(self: *Self, features: anytype) bool { + return Target.x86.featureSetHasAll(self.target.cpu.features, features); +} diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 944fe85458..05c48ecddf 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -23,6 +23,7 @@ const Data = struct { opc: [7]u8, modrm_ext: u3, mode: Mode, + feature: Feature, }; pub fn findByMnemonic( @@ -58,7 +59,7 @@ pub fn findByMnemonic( next: for (mnemonic_to_encodings_map[@enumToInt(mnemonic)]) |data| { switch (data.mode) { .rex => if (!rex_required) continue, - .long, .sse_long, .sse2_long => {}, + .long => {}, else => if (rex_required) continue, } for (input_ops, data.ops) |input_op, data_op| @@ -136,22 +137,20 @@ pub fn modRmExt(encoding: Encoding) u3 { } pub fn operandBitSize(encoding: Encoding) u64 { - switch (encoding.data.mode) { - .short => return 16, - .long, .sse_long, .sse2_long => return 64, - else => {}, - } - const bit_size: u64 = switch (encoding.data.op_en) { - .np => switch (encoding.data.ops[0]) { - .o16 => 16, - .o32 => 32, - .o64 => 64, - else => 32, + return switch (encoding.data.mode) { + .short => 16, + .long => 64, + else => switch (encoding.data.op_en) { + .np => switch (encoding.data.ops[0]) { + .o16 => 16, + .o32 => 32, + .o64 => 64, + else => 32, + }, + .td => encoding.data.ops[1].bitSize(), + else => encoding.data.ops[0].bitSize(), }, - .td => encoding.data.ops[1].bitSize(), - else => encoding.data.ops[0].bitSize(), }; - return bit_size; } pub fn format( @@ -162,12 +161,50 @@ pub fn format( ) !void { _ = options; _ = fmt; + + var opc = encoding.opcode(); switch (encoding.data.mode) { - .long, .sse_long, .sse2_long => try writer.writeAll("REX.W + "), else => {}, + .long => try writer.writeAll("REX.W + "), + .vex_128, .vex_128_long, .vex_256, .vex_256_long => { + try writer.writeAll("VEX."); + + switch (encoding.data.mode) { + .vex_128, .vex_128_long => try writer.writeAll("128"), + .vex_256, .vex_256_long => try writer.writeAll("256"), + else => unreachable, + } + + switch (opc[0]) { + else => {}, + 0x66, 0xf3, 0xf2 => { + try writer.print(".{X:0>2}", .{opc[0]}); + opc = opc[1..]; + }, + } + + try writer.print(".{X:0>2}", .{opc[0]}); + opc = opc[1..]; + + switch (opc[0]) { + else => {}, + 0x38, 0x3A => { + try writer.print("{X:0>2}", .{opc[0]}); + opc = opc[1..]; + }, + } + + try writer.writeByte('.'); + try writer.writeAll(switch (encoding.data.mode) { + .vex_128, .vex_256 => "W0", + .vex_128_long, .vex_256_long => "W1", + else => unreachable, + }); + try writer.writeByte(' '); + }, } - for (encoding.opcode()) |byte| { + for (opc) |byte| { try writer.print("{x:0>2} ", .{byte}); } @@ -184,15 +221,16 @@ pub fn format( try writer.print("+{s} ", .{tag}); }, .m, .mi, .m1, .mc => try writer.print("/{d} ", .{encoding.modRmExt()}), - .mr, .rm, .rmi, .mri, .mrc => try writer.writeAll("/r "), + .mr, .rm, .rmi, .mri, .mrc, .rrm, .rrmi => try writer.writeAll("/r "), } switch (encoding.data.op_en) { - .i, .d, .zi, .oi, .mi, .rmi, .mri => { + .i, .d, .zi, .oi, .mi, .rmi, .mri, .rrmi => { const op = switch (encoding.data.op_en) { .i, .d => encoding.data.ops[0], .zi, .oi, .mi => encoding.data.ops[1], .rmi, .mri => encoding.data.ops[2], + .rrmi => encoding.data.ops[3], else => unreachable, }; const tag = switch (op) { @@ -207,7 +245,7 @@ pub fn format( }; try writer.print("{s} ", .{tag}); }, - .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm, .mrc => {}, + .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm, .mrc, .rrm => {}, } try writer.print("{s} ", .{@tagName(encoding.mnemonic)}); @@ -305,6 +343,8 @@ pub const Mnemonic = enum { // SSE4.1 roundss, roundsd, + // F16C + vcvtph2ps, vcvtps2ph, // zig fmt: on }; @@ -317,6 +357,7 @@ pub const OpEn = enum { fd, td, m1, mc, mi, mr, rm, rmi, mri, mrc, + rrm, rrmi, // zig fmt: on }; @@ -549,14 +590,21 @@ pub const Op = enum { pub const Mode = enum { none, short, - fpu, rex, long, + vex_128, + vex_128_long, + vex_256, + vex_256_long, +}; + +pub const Feature = enum { + none, + f16c, sse, - sse_long, sse2, - sse2_long, sse4_1, + x87, }; fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Operand) usize { @@ -593,6 +641,7 @@ const mnemonic_to_encodings_map = init: { .opc = undefined, .modrm_ext = entry[4], .mode = entry[5], + .feature = entry[6], }; // TODO: use `@memcpy` for these. When I did that, I got a false positive // compile error for this copy happening at compile time. diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 4289cfaf2a..9571f50e7c 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -133,6 +133,9 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .subsd, .ucomisd, .xorpd, + + .vcvtph2ps, + .vcvtps2ph, => try lower.mirGeneric(inst), .cmps, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 6b2db1b696..c4e19fdc0e 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -247,6 +247,11 @@ pub const Inst = struct { /// Bitwise logical xor of packed double precision floating-point values xorpd, + /// Convert 16-bit floating-point values to single-precision floating-point values + vcvtph2ps, + /// Convert single-precision floating-point values to 16-bit floating-point values + vcvtps2ph, + /// Compare string operands cmps, /// Load string diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index 4c900697f5..94f4eb56d5 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -209,10 +209,19 @@ pub const Instruction = struct { const enc = inst.encoding; const data = enc.data; - try inst.encodeLegacyPrefixes(encoder); - try inst.encodeMandatoryPrefix(encoder); - try inst.encodeRexPrefix(encoder); - try inst.encodeOpcode(encoder); + switch (data.mode) { + .none, .short, .rex, .long => { + try inst.encodeLegacyPrefixes(encoder); + try inst.encodeMandatoryPrefix(encoder); + try inst.encodeRexPrefix(encoder); + try inst.encodeOpcode(encoder); + }, + .vex_128, .vex_128_long, .vex_256, .vex_256_long => { + try inst.encodeVexPrefix(encoder); + const opc = inst.encoding.opcode(); + try encoder.opcode_1byte(opc[opc.len - 1]); + }, + } switch (data.op_en) { .np, .o => {}, @@ -309,6 +318,7 @@ pub const Instruction = struct { } else null, + .rrm, .rrmi => unreachable, }; if (segment_override) |seg| { legacy.setSegmentOverride(seg); @@ -322,10 +332,7 @@ pub const Instruction = struct { var rex = Rex{}; rex.present = inst.encoding.data.mode == .rex; - switch (inst.encoding.data.mode) { - .long, .sse_long, .sse2_long => rex.w = true, - else => {}, - } + rex.w = inst.encoding.data.mode == .long; switch (op_en) { .np, .i, .zi, .fd, .td, .d => {}, @@ -346,11 +353,76 @@ pub const Instruction = struct { rex.b = b_x_op.isBaseExtended(); rex.x = b_x_op.isIndexExtended(); }, + .rrm, .rrmi => unreachable, } try encoder.rex(rex); } + fn encodeVexPrefix(inst: Instruction, encoder: anytype) !void { + const op_en = inst.encoding.data.op_en; + const opc = inst.encoding.opcode(); + const mand_pre = inst.encoding.mandatoryPrefix(); + + var vex = Vex{}; + + vex.w = switch (inst.encoding.data.mode) { + .vex_128, .vex_256 => false, + .vex_128_long, .vex_256_long => true, + else => unreachable, + }; + + switch (op_en) { + .np, .i, .zi, .fd, .td, .d => {}, + .o, .oi => vex.b = inst.ops[0].reg.isExtended(), + .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc, .rrm, .rrmi => { + const r_op = switch (op_en) { + .rm, .rmi, .rrm, .rrmi => inst.ops[0], + .mr, .mri, .mrc => inst.ops[1], + else => .none, + }; + vex.r = r_op.isBaseExtended(); + + const b_x_op = switch (op_en) { + .rm, .rmi => inst.ops[1], + .m, .mi, .m1, .mc, .mr, .mri, .mrc => inst.ops[0], + .rrm, .rrmi => inst.ops[2], + else => unreachable, + }; + vex.b = b_x_op.isBaseExtended(); + vex.x = b_x_op.isIndexExtended(); + }, + } + + vex.l = switch (inst.encoding.data.mode) { + .vex_128, .vex_128_long => false, + .vex_256, .vex_256_long => true, + else => unreachable, + }; + + vex.p = if (mand_pre) |mand| switch (mand) { + 0x66 => .@"66", + 0xf2 => .f2, + 0xf3 => .f3, + else => unreachable, + } else .none; + + const leading: usize = if (mand_pre) |_| 1 else 0; + assert(opc[leading] == 0x0f); + vex.m = switch (opc[leading + 1]) { + else => .@"0f", + 0x38 => .@"0f38", + 0x3a => .@"0f3a", + }; + + switch (op_en) { + else => {}, + .rrm, .rrmi => vex.v = inst.ops[1].reg, + } + + try encoder.vex(vex); + } + fn encodeMandatoryPrefix(inst: Instruction, encoder: anytype) !void { const prefix = inst.encoding.mandatoryPrefix() orelse return; try encoder.opcode_1byte(prefix); @@ -562,17 +634,48 @@ fn Encoder(comptime T: type, comptime opts: Options) type { /// or one of reg, index, r/m, base, or opcode-reg might be extended. /// /// See struct `Rex` for a description of each field. - pub fn rex(self: Self, byte: Rex) !void { - if (!byte.present and !byte.isSet()) return; + pub fn rex(self: Self, fields: Rex) !void { + if (!fields.present and !fields.isSet()) return; + + var byte: u8 = 0b0100_0000; - var value: u8 = 0b0100_0000; + if (fields.w) byte |= 0b1000; + if (fields.r) byte |= 0b0100; + if (fields.x) byte |= 0b0010; + if (fields.b) byte |= 0b0001; - if (byte.w) value |= 0b1000; - if (byte.r) value |= 0b0100; - if (byte.x) value |= 0b0010; - if (byte.b) value |= 0b0001; + try self.writer.writeByte(byte); + } - try self.writer.writeByte(value); + /// Encodes a VEX prefix given all the fields + /// + /// See struct `Vex` for a description of each field. + pub fn vex(self: Self, fields: Vex) !void { + if (fields.is3Byte()) { + try self.writer.writeByte(0b1100_0100); + + try self.writer.writeByte( + @as(u8, ~@boolToInt(fields.r)) << 7 | + @as(u8, ~@boolToInt(fields.x)) << 6 | + @as(u8, ~@boolToInt(fields.b)) << 5 | + @as(u8, @enumToInt(fields.m)) << 0, + ); + + try self.writer.writeByte( + @as(u8, @boolToInt(fields.w)) << 7 | + @as(u8, ~fields.v.enc()) << 3 | + @as(u8, @boolToInt(fields.l)) << 2 | + @as(u8, @enumToInt(fields.p)) << 0, + ); + } else { + try self.writer.writeByte(0b1100_0101); + try self.writer.writeByte( + @as(u8, ~@boolToInt(fields.r)) << 7 | + @as(u8, ~fields.v.enc()) << 3 | + @as(u8, @boolToInt(fields.l)) << 2 | + @as(u8, @enumToInt(fields.p)) << 0, + ); + } } // ------ @@ -848,6 +951,31 @@ pub const Rex = struct { } }; +pub const Vex = struct { + w: bool = false, + r: bool = false, + x: bool = false, + b: bool = false, + l: bool = false, + p: enum(u2) { + none = 0b00, + @"66" = 0b01, + f3 = 0b10, + f2 = 0b11, + } = .none, + m: enum(u5) { + @"0f" = 0b0_0001, + @"0f38" = 0b0_0010, + @"0f3a" = 0b0_0011, + _, + } = .@"0f", + v: Register = .ymm0, + + pub fn is3Byte(vex: Vex) bool { + return vex.w or vex.x or vex.b or vex.m != .@"0f"; + } +}; + // Tests fn expectEqualHexStrings(expected: []const u8, given: []const u8, assembly: []const u8) !void { assert(expected.len > 0); diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index f87a110e99..52b8cc29d6 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -3,933 +3,939 @@ const Mnemonic = Encoding.Mnemonic; const OpEn = Encoding.OpEn; const Op = Encoding.Op; const Mode = Encoding.Mode; +const Feature = Encoding.Feature; const modrm_ext = u3; -pub const Entry = struct { Mnemonic, OpEn, []const Op, []const u8, modrm_ext, Mode }; +pub const Entry = struct { Mnemonic, OpEn, []const Op, []const u8, modrm_ext, Mode, Feature }; // TODO move this into a .zon file when Zig is capable of importing .zon files // zig fmt: off pub const table = [_]Entry{ // General-purpose - .{ .adc, .zi, &.{ .al, .imm8 }, &.{ 0x14 }, 0, .none }, - .{ .adc, .zi, &.{ .ax, .imm16 }, &.{ 0x15 }, 0, .none }, - .{ .adc, .zi, &.{ .eax, .imm32 }, &.{ 0x15 }, 0, .none }, - .{ .adc, .zi, &.{ .rax, .imm32s }, &.{ 0x15 }, 0, .long }, - .{ .adc, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 2, .none }, - .{ .adc, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 2, .rex }, - .{ .adc, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 2, .none }, - .{ .adc, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 2, .none }, - .{ .adc, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 2, .long }, - .{ .adc, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 2, .none }, - .{ .adc, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 2, .none }, - .{ .adc, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 2, .long }, - .{ .adc, .mr, &.{ .rm8, .r8 }, &.{ 0x10 }, 0, .none }, - .{ .adc, .mr, &.{ .rm8, .r8 }, &.{ 0x10 }, 0, .rex }, - .{ .adc, .mr, &.{ .rm16, .r16 }, &.{ 0x11 }, 0, .none }, - .{ .adc, .mr, &.{ .rm32, .r32 }, &.{ 0x11 }, 0, .none }, - .{ .adc, .mr, &.{ .rm64, .r64 }, &.{ 0x11 }, 0, .long }, - .{ .adc, .rm, &.{ .r8, .rm8 }, &.{ 0x12 }, 0, .none }, - .{ .adc, .rm, &.{ .r8, .rm8 }, &.{ 0x12 }, 0, .rex }, - .{ .adc, .rm, &.{ .r16, .rm16 }, &.{ 0x13 }, 0, .none }, - .{ .adc, .rm, &.{ .r32, .rm32 }, &.{ 0x13 }, 0, .none }, - .{ .adc, .rm, &.{ .r64, .rm64 }, &.{ 0x13 }, 0, .long }, - - .{ .add, .zi, &.{ .al, .imm8 }, &.{ 0x04 }, 0, .none }, - .{ .add, .zi, &.{ .ax, .imm16 }, &.{ 0x05 }, 0, .none }, - .{ .add, .zi, &.{ .eax, .imm32 }, &.{ 0x05 }, 0, .none }, - .{ .add, .zi, &.{ .rax, .imm32s }, &.{ 0x05 }, 0, .long }, - .{ .add, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 0, .none }, - .{ .add, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 0, .rex }, - .{ .add, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 0, .none }, - .{ .add, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 0, .none }, - .{ .add, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 0, .long }, - .{ .add, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 0, .none }, - .{ .add, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 0, .none }, - .{ .add, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 0, .long }, - .{ .add, .mr, &.{ .rm8, .r8 }, &.{ 0x00 }, 0, .none }, - .{ .add, .mr, &.{ .rm8, .r8 }, &.{ 0x00 }, 0, .rex }, - .{ .add, .mr, &.{ .rm16, .r16 }, &.{ 0x01 }, 0, .none }, - .{ .add, .mr, &.{ .rm32, .r32 }, &.{ 0x01 }, 0, .none }, - .{ .add, .mr, &.{ .rm64, .r64 }, &.{ 0x01 }, 0, .long }, - .{ .add, .rm, &.{ .r8, .rm8 }, &.{ 0x02 }, 0, .none }, - .{ .add, .rm, &.{ .r8, .rm8 }, &.{ 0x02 }, 0, .rex }, - .{ .add, .rm, &.{ .r16, .rm16 }, &.{ 0x03 }, 0, .none }, - .{ .add, .rm, &.{ .r32, .rm32 }, &.{ 0x03 }, 0, .none }, - .{ .add, .rm, &.{ .r64, .rm64 }, &.{ 0x03 }, 0, .long }, - - .{ .@"and", .zi, &.{ .al, .imm8 }, &.{ 0x24 }, 0, .none }, - .{ .@"and", .zi, &.{ .ax, .imm16 }, &.{ 0x25 }, 0, .none }, - .{ .@"and", .zi, &.{ .eax, .imm32 }, &.{ 0x25 }, 0, .none }, - .{ .@"and", .zi, &.{ .rax, .imm32s }, &.{ 0x25 }, 0, .long }, - .{ .@"and", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 4, .none }, - .{ .@"and", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 4, .rex }, - .{ .@"and", .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 4, .none }, - .{ .@"and", .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 4, .none }, - .{ .@"and", .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 4, .long }, - .{ .@"and", .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 4, .none }, - .{ .@"and", .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 4, .none }, - .{ .@"and", .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 4, .long }, - .{ .@"and", .mr, &.{ .rm8, .r8 }, &.{ 0x20 }, 0, .none }, - .{ .@"and", .mr, &.{ .rm8, .r8 }, &.{ 0x20 }, 0, .rex }, - .{ .@"and", .mr, &.{ .rm16, .r16 }, &.{ 0x21 }, 0, .none }, - .{ .@"and", .mr, &.{ .rm32, .r32 }, &.{ 0x21 }, 0, .none }, - .{ .@"and", .mr, &.{ .rm64, .r64 }, &.{ 0x21 }, 0, .long }, - .{ .@"and", .rm, &.{ .r8, .rm8 }, &.{ 0x22 }, 0, .none }, - .{ .@"and", .rm, &.{ .r8, .rm8 }, &.{ 0x22 }, 0, .rex }, - .{ .@"and", .rm, &.{ .r16, .rm16 }, &.{ 0x23 }, 0, .none }, - .{ .@"and", .rm, &.{ .r32, .rm32 }, &.{ 0x23 }, 0, .none }, - .{ .@"and", .rm, &.{ .r64, .rm64 }, &.{ 0x23 }, 0, .long }, - - .{ .bsf, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0xbc }, 0, .none }, - .{ .bsf, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0xbc }, 0, .none }, - .{ .bsf, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0xbc }, 0, .long }, - - .{ .bsr, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0xbd }, 0, .none }, - .{ .bsr, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0xbd }, 0, .none }, - .{ .bsr, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0xbd }, 0, .long }, - - .{ .bswap, .o, &.{ .r32 }, &.{ 0x0f, 0xc8 }, 0, .none }, - .{ .bswap, .o, &.{ .r64 }, &.{ 0x0f, 0xc8 }, 0, .long }, - - .{ .bt, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xa3 }, 0, .none }, - .{ .bt, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xa3 }, 0, .none }, - .{ .bt, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xa3 }, 0, .long }, - .{ .bt, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 4, .none }, - .{ .bt, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 4, .none }, - .{ .bt, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 4, .long }, - - .{ .btc, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xbb }, 0, .none }, - .{ .btc, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xbb }, 0, .none }, - .{ .btc, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xbb }, 0, .long }, - .{ .btc, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 7, .none }, - .{ .btc, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 7, .none }, - .{ .btc, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 7, .long }, - - .{ .btr, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xb3 }, 0, .none }, - .{ .btr, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xb3 }, 0, .none }, - .{ .btr, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xb3 }, 0, .long }, - .{ .btr, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 6, .none }, - .{ .btr, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 6, .none }, - .{ .btr, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 6, .long }, - - .{ .bts, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xab }, 0, .none }, - .{ .bts, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xab }, 0, .none }, - .{ .bts, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xab }, 0, .long }, - .{ .bts, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 5, .none }, - .{ .bts, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 5, .none }, - .{ .bts, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 5, .long }, + .{ .adc, .zi, &.{ .al, .imm8 }, &.{ 0x14 }, 0, .none, .none }, + .{ .adc, .zi, &.{ .ax, .imm16 }, &.{ 0x15 }, 0, .none, .none }, + .{ .adc, .zi, &.{ .eax, .imm32 }, &.{ 0x15 }, 0, .none, .none }, + .{ .adc, .zi, &.{ .rax, .imm32s }, &.{ 0x15 }, 0, .long, .none }, + .{ .adc, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 2, .none, .none }, + .{ .adc, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 2, .rex, .none }, + .{ .adc, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 2, .none, .none }, + .{ .adc, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 2, .none, .none }, + .{ .adc, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 2, .long, .none }, + .{ .adc, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 2, .none, .none }, + .{ .adc, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 2, .none, .none }, + .{ .adc, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 2, .long, .none }, + .{ .adc, .mr, &.{ .rm8, .r8 }, &.{ 0x10 }, 0, .none, .none }, + .{ .adc, .mr, &.{ .rm8, .r8 }, &.{ 0x10 }, 0, .rex, .none }, + .{ .adc, .mr, &.{ .rm16, .r16 }, &.{ 0x11 }, 0, .none, .none }, + .{ .adc, .mr, &.{ .rm32, .r32 }, &.{ 0x11 }, 0, .none, .none }, + .{ .adc, .mr, &.{ .rm64, .r64 }, &.{ 0x11 }, 0, .long, .none }, + .{ .adc, .rm, &.{ .r8, .rm8 }, &.{ 0x12 }, 0, .none, .none }, + .{ .adc, .rm, &.{ .r8, .rm8 }, &.{ 0x12 }, 0, .rex, .none }, + .{ .adc, .rm, &.{ .r16, .rm16 }, &.{ 0x13 }, 0, .none, .none }, + .{ .adc, .rm, &.{ .r32, .rm32 }, &.{ 0x13 }, 0, .none, .none }, + .{ .adc, .rm, &.{ .r64, .rm64 }, &.{ 0x13 }, 0, .long, .none }, + + .{ .add, .zi, &.{ .al, .imm8 }, &.{ 0x04 }, 0, .none, .none }, + .{ .add, .zi, &.{ .ax, .imm16 }, &.{ 0x05 }, 0, .none, .none }, + .{ .add, .zi, &.{ .eax, .imm32 }, &.{ 0x05 }, 0, .none, .none }, + .{ .add, .zi, &.{ .rax, .imm32s }, &.{ 0x05 }, 0, .long, .none }, + .{ .add, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 0, .none, .none }, + .{ .add, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 0, .rex, .none }, + .{ .add, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 0, .none, .none }, + .{ .add, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 0, .none, .none }, + .{ .add, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 0, .long, .none }, + .{ .add, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 0, .none, .none }, + .{ .add, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 0, .none, .none }, + .{ .add, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 0, .long, .none }, + .{ .add, .mr, &.{ .rm8, .r8 }, &.{ 0x00 }, 0, .none, .none }, + .{ .add, .mr, &.{ .rm8, .r8 }, &.{ 0x00 }, 0, .rex, .none }, + .{ .add, .mr, &.{ .rm16, .r16 }, &.{ 0x01 }, 0, .none, .none }, + .{ .add, .mr, &.{ .rm32, .r32 }, &.{ 0x01 }, 0, .none, .none }, + .{ .add, .mr, &.{ .rm64, .r64 }, &.{ 0x01 }, 0, .long, .none }, + .{ .add, .rm, &.{ .r8, .rm8 }, &.{ 0x02 }, 0, .none, .none }, + .{ .add, .rm, &.{ .r8, .rm8 }, &.{ 0x02 }, 0, .rex, .none }, + .{ .add, .rm, &.{ .r16, .rm16 }, &.{ 0x03 }, 0, .none, .none }, + .{ .add, .rm, &.{ .r32, .rm32 }, &.{ 0x03 }, 0, .none, .none }, + .{ .add, .rm, &.{ .r64, .rm64 }, &.{ 0x03 }, 0, .long, .none }, + + .{ .@"and", .zi, &.{ .al, .imm8 }, &.{ 0x24 }, 0, .none, .none }, + .{ .@"and", .zi, &.{ .ax, .imm16 }, &.{ 0x25 }, 0, .none, .none }, + .{ .@"and", .zi, &.{ .eax, .imm32 }, &.{ 0x25 }, 0, .none, .none }, + .{ .@"and", .zi, &.{ .rax, .imm32s }, &.{ 0x25 }, 0, .long, .none }, + .{ .@"and", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 4, .none, .none }, + .{ .@"and", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 4, .rex, .none }, + .{ .@"and", .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 4, .none, .none }, + .{ .@"and", .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 4, .none, .none }, + .{ .@"and", .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 4, .long, .none }, + .{ .@"and", .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 4, .none, .none }, + .{ .@"and", .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 4, .none, .none }, + .{ .@"and", .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 4, .long, .none }, + .{ .@"and", .mr, &.{ .rm8, .r8 }, &.{ 0x20 }, 0, .none, .none }, + .{ .@"and", .mr, &.{ .rm8, .r8 }, &.{ 0x20 }, 0, .rex, .none }, + .{ .@"and", .mr, &.{ .rm16, .r16 }, &.{ 0x21 }, 0, .none, .none }, + .{ .@"and", .mr, &.{ .rm32, .r32 }, &.{ 0x21 }, 0, .none, .none }, + .{ .@"and", .mr, &.{ .rm64, .r64 }, &.{ 0x21 }, 0, .long, .none }, + .{ .@"and", .rm, &.{ .r8, .rm8 }, &.{ 0x22 }, 0, .none, .none }, + .{ .@"and", .rm, &.{ .r8, .rm8 }, &.{ 0x22 }, 0, .rex, .none }, + .{ .@"and", .rm, &.{ .r16, .rm16 }, &.{ 0x23 }, 0, .none, .none }, + .{ .@"and", .rm, &.{ .r32, .rm32 }, &.{ 0x23 }, 0, .none, .none }, + .{ .@"and", .rm, &.{ .r64, .rm64 }, &.{ 0x23 }, 0, .long, .none }, + + .{ .bsf, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0xbc }, 0, .none, .none }, + .{ .bsf, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0xbc }, 0, .none, .none }, + .{ .bsf, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0xbc }, 0, .long, .none }, + + .{ .bsr, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0xbd }, 0, .none, .none }, + .{ .bsr, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0xbd }, 0, .none, .none }, + .{ .bsr, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0xbd }, 0, .long, .none }, + + .{ .bswap, .o, &.{ .r32 }, &.{ 0x0f, 0xc8 }, 0, .none, .none }, + .{ .bswap, .o, &.{ .r64 }, &.{ 0x0f, 0xc8 }, 0, .long, .none }, + + .{ .bt, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xa3 }, 0, .none, .none }, + .{ .bt, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xa3 }, 0, .none, .none }, + .{ .bt, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xa3 }, 0, .long, .none }, + .{ .bt, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 4, .none, .none }, + .{ .bt, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 4, .none, .none }, + .{ .bt, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 4, .long, .none }, + + .{ .btc, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xbb }, 0, .none, .none }, + .{ .btc, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xbb }, 0, .none, .none }, + .{ .btc, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xbb }, 0, .long, .none }, + .{ .btc, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 7, .none, .none }, + .{ .btc, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 7, .none, .none }, + .{ .btc, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 7, .long, .none }, + + .{ .btr, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xb3 }, 0, .none, .none }, + .{ .btr, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xb3 }, 0, .none, .none }, + .{ .btr, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xb3 }, 0, .long, .none }, + .{ .btr, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 6, .none, .none }, + .{ .btr, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 6, .none, .none }, + .{ .btr, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 6, .long, .none }, + + .{ .bts, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xab }, 0, .none, .none }, + .{ .bts, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xab }, 0, .none, .none }, + .{ .bts, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xab }, 0, .long, .none }, + .{ .bts, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 5, .none, .none }, + .{ .bts, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 5, .none, .none }, + .{ .bts, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 5, .long, .none }, // This is M encoding according to Intel, but D makes more sense here. - .{ .call, .d, &.{ .rel32 }, &.{ 0xe8 }, 0, .none }, - .{ .call, .m, &.{ .rm64 }, &.{ 0xff }, 2, .none }, - - .{ .cbw, .np, &.{ .o16 }, &.{ 0x98 }, 0, .none }, - .{ .cwde, .np, &.{ .o32 }, &.{ 0x98 }, 0, .none }, - .{ .cdqe, .np, &.{ .o64 }, &.{ 0x98 }, 0, .long }, - - .{ .cwd, .np, &.{ .o16 }, &.{ 0x99 }, 0, .none }, - .{ .cdq, .np, &.{ .o32 }, &.{ 0x99 }, 0, .none }, - .{ .cqo, .np, &.{ .o64 }, &.{ 0x99 }, 0, .long }, - - .{ .cmova, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x47 }, 0, .none }, - .{ .cmova, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x47 }, 0, .none }, - .{ .cmova, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x47 }, 0, .long }, - .{ .cmovae, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .none }, - .{ .cmovae, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none }, - .{ .cmovae, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long }, - .{ .cmovb, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .none }, - .{ .cmovb, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none }, - .{ .cmovb, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long }, - .{ .cmovbe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x46 }, 0, .none }, - .{ .cmovbe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x46 }, 0, .none }, - .{ .cmovbe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x46 }, 0, .long }, - .{ .cmovc, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .none }, - .{ .cmovc, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none }, - .{ .cmovc, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long }, - .{ .cmove, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x44 }, 0, .none }, - .{ .cmove, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x44 }, 0, .none }, - .{ .cmove, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x44 }, 0, .long }, - .{ .cmovg, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4f }, 0, .none }, - .{ .cmovg, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4f }, 0, .none }, - .{ .cmovg, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4f }, 0, .long }, - .{ .cmovge, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4d }, 0, .none }, - .{ .cmovge, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4d }, 0, .none }, - .{ .cmovge, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4d }, 0, .long }, - .{ .cmovl, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4c }, 0, .none }, - .{ .cmovl, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4c }, 0, .none }, - .{ .cmovl, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4c }, 0, .long }, - .{ .cmovle, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4e }, 0, .none }, - .{ .cmovle, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4e }, 0, .none }, - .{ .cmovle, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4e }, 0, .long }, - .{ .cmovna, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x46 }, 0, .none }, - .{ .cmovna, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x46 }, 0, .none }, - .{ .cmovna, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x46 }, 0, .long }, - .{ .cmovnae, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .none }, - .{ .cmovnae, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none }, - .{ .cmovnae, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long }, - .{ .cmovnb, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .none }, - .{ .cmovnb, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none }, - .{ .cmovnb, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long }, - .{ .cmovnbe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x47 }, 0, .none }, - .{ .cmovnbe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x47 }, 0, .none }, - .{ .cmovnbe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x47 }, 0, .long }, - .{ .cmovnc, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .none }, - .{ .cmovnc, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none }, - .{ .cmovnc, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long }, - .{ .cmovne, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x45 }, 0, .none }, - .{ .cmovne, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x45 }, 0, .none }, - .{ .cmovne, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x45 }, 0, .long }, - .{ .cmovng, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4e }, 0, .none }, - .{ .cmovng, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4e }, 0, .none }, - .{ .cmovng, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4e }, 0, .long }, - .{ .cmovnge, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4c }, 0, .none }, - .{ .cmovnge, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4c }, 0, .none }, - .{ .cmovnge, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4c }, 0, .long }, - .{ .cmovnl, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4d }, 0, .none }, - .{ .cmovnl, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4d }, 0, .none }, - .{ .cmovnl, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4d }, 0, .long }, - .{ .cmovnle, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4f }, 0, .none }, - .{ .cmovnle, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4f }, 0, .none }, - .{ .cmovnle, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4f }, 0, .long }, - .{ .cmovno, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x41 }, 0, .none }, - .{ .cmovno, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x41 }, 0, .none }, - .{ .cmovno, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x41 }, 0, .long }, - .{ .cmovnp, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4b }, 0, .none }, - .{ .cmovnp, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4b }, 0, .none }, - .{ .cmovnp, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4b }, 0, .long }, - .{ .cmovns, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x49 }, 0, .none }, - .{ .cmovns, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x49 }, 0, .none }, - .{ .cmovns, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x49 }, 0, .long }, - .{ .cmovnz, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x45 }, 0, .none }, - .{ .cmovnz, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x45 }, 0, .none }, - .{ .cmovnz, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x45 }, 0, .long }, - .{ .cmovo, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x40 }, 0, .none }, - .{ .cmovo, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x40 }, 0, .none }, - .{ .cmovo, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x40 }, 0, .long }, - .{ .cmovp, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4a }, 0, .none }, - .{ .cmovp, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4a }, 0, .none }, - .{ .cmovp, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4a }, 0, .long }, - .{ .cmovpe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4a }, 0, .none }, - .{ .cmovpe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4a }, 0, .none }, - .{ .cmovpe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4a }, 0, .long }, - .{ .cmovpo, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4b }, 0, .none }, - .{ .cmovpo, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4b }, 0, .none }, - .{ .cmovpo, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4b }, 0, .long }, - .{ .cmovs, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x48 }, 0, .none }, - .{ .cmovs, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x48 }, 0, .none }, - .{ .cmovs, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x48 }, 0, .long }, - .{ .cmovz, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x44 }, 0, .none }, - .{ .cmovz, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x44 }, 0, .none }, - .{ .cmovz, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x44 }, 0, .long }, - - .{ .cmp, .zi, &.{ .al, .imm8 }, &.{ 0x3c }, 0, .none }, - .{ .cmp, .zi, &.{ .ax, .imm16 }, &.{ 0x3d }, 0, .none }, - .{ .cmp, .zi, &.{ .eax, .imm32 }, &.{ 0x3d }, 0, .none }, - .{ .cmp, .zi, &.{ .rax, .imm32s }, &.{ 0x3d }, 0, .long }, - .{ .cmp, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 7, .none }, - .{ .cmp, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 7, .rex }, - .{ .cmp, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 7, .none }, - .{ .cmp, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 7, .none }, - .{ .cmp, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 7, .long }, - .{ .cmp, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 7, .none }, - .{ .cmp, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 7, .none }, - .{ .cmp, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 7, .long }, - .{ .cmp, .mr, &.{ .rm8, .r8 }, &.{ 0x38 }, 0, .none }, - .{ .cmp, .mr, &.{ .rm8, .r8 }, &.{ 0x38 }, 0, .rex }, - .{ .cmp, .mr, &.{ .rm16, .r16 }, &.{ 0x39 }, 0, .none }, - .{ .cmp, .mr, &.{ .rm32, .r32 }, &.{ 0x39 }, 0, .none }, - .{ .cmp, .mr, &.{ .rm64, .r64 }, &.{ 0x39 }, 0, .long }, - .{ .cmp, .rm, &.{ .r8, .rm8 }, &.{ 0x3a }, 0, .none }, - .{ .cmp, .rm, &.{ .r8, .rm8 }, &.{ 0x3a }, 0, .rex }, - .{ .cmp, .rm, &.{ .r16, .rm16 }, &.{ 0x3b }, 0, .none }, - .{ .cmp, .rm, &.{ .r32, .rm32 }, &.{ 0x3b }, 0, .none }, - .{ .cmp, .rm, &.{ .r64, .rm64 }, &.{ 0x3b }, 0, .long }, - - .{ .cmps, .np, &.{ .m8, .m8 }, &.{ 0xa6 }, 0, .none }, - .{ .cmps, .np, &.{ .m16, .m16 }, &.{ 0xa7 }, 0, .none }, - .{ .cmps, .np, &.{ .m32, .m32 }, &.{ 0xa7 }, 0, .none }, - .{ .cmps, .np, &.{ .m64, .m64 }, &.{ 0xa7 }, 0, .long }, - - .{ .cmpsb, .np, &.{}, &.{ 0xa6 }, 0, .none }, - .{ .cmpsw, .np, &.{}, &.{ 0xa7 }, 0, .short }, - .{ .cmpsd, .np, &.{}, &.{ 0xa7 }, 0, .none }, - .{ .cmpsq, .np, &.{}, &.{ 0xa7 }, 0, .long }, - - .{ .cmpxchg, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xb0 }, 0, .none }, - .{ .cmpxchg, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xb0 }, 0, .rex }, - .{ .cmpxchg, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xb1 }, 0, .none }, - .{ .cmpxchg, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xb1 }, 0, .none }, - .{ .cmpxchg, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xb1 }, 0, .long }, - - .{ .cmpxchg8b , .m, &.{ .m64 }, &.{ 0x0f, 0xc7 }, 1, .none }, - .{ .cmpxchg16b, .m, &.{ .m128 }, &.{ 0x0f, 0xc7 }, 1, .long }, - - .{ .div, .m, &.{ .rm8 }, &.{ 0xf6 }, 6, .none }, - .{ .div, .m, &.{ .rm8 }, &.{ 0xf6 }, 6, .rex }, - .{ .div, .m, &.{ .rm16 }, &.{ 0xf7 }, 6, .none }, - .{ .div, .m, &.{ .rm32 }, &.{ 0xf7 }, 6, .none }, - .{ .div, .m, &.{ .rm64 }, &.{ 0xf7 }, 6, .long }, - - .{ .fisttp, .m, &.{ .m16 }, &.{ 0xdf }, 1, .fpu }, - .{ .fisttp, .m, &.{ .m32 }, &.{ 0xdb }, 1, .fpu }, - .{ .fisttp, .m, &.{ .m64 }, &.{ 0xdd }, 1, .fpu }, - - .{ .fld, .m, &.{ .m32 }, &.{ 0xd9 }, 0, .fpu }, - .{ .fld, .m, &.{ .m64 }, &.{ 0xdd }, 0, .fpu }, - .{ .fld, .m, &.{ .m80 }, &.{ 0xdb }, 5, .fpu }, - - .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .none }, - .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .rex }, - .{ .idiv, .m, &.{ .rm16 }, &.{ 0xf7 }, 7, .none }, - .{ .idiv, .m, &.{ .rm32 }, &.{ 0xf7 }, 7, .none }, - .{ .idiv, .m, &.{ .rm64 }, &.{ 0xf7 }, 7, .long }, - - .{ .imul, .m, &.{ .rm8 }, &.{ 0xf6 }, 5, .none }, - .{ .imul, .m, &.{ .rm8 }, &.{ 0xf6 }, 5, .rex }, - .{ .imul, .m, &.{ .rm16, }, &.{ 0xf7 }, 5, .none }, - .{ .imul, .m, &.{ .rm32, }, &.{ 0xf7 }, 5, .none }, - .{ .imul, .m, &.{ .rm64, }, &.{ 0xf7 }, 5, .long }, - .{ .imul, .rm, &.{ .r16, .rm16, }, &.{ 0x0f, 0xaf }, 0, .none }, - .{ .imul, .rm, &.{ .r32, .rm32, }, &.{ 0x0f, 0xaf }, 0, .none }, - .{ .imul, .rm, &.{ .r64, .rm64, }, &.{ 0x0f, 0xaf }, 0, .long }, - .{ .imul, .rmi, &.{ .r16, .rm16, .imm8s }, &.{ 0x6b }, 0, .none }, - .{ .imul, .rmi, &.{ .r32, .rm32, .imm8s }, &.{ 0x6b }, 0, .none }, - .{ .imul, .rmi, &.{ .r64, .rm64, .imm8s }, &.{ 0x6b }, 0, .long }, - .{ .imul, .rmi, &.{ .r16, .rm16, .imm16 }, &.{ 0x69 }, 0, .none }, - .{ .imul, .rmi, &.{ .r32, .rm32, .imm32 }, &.{ 0x69 }, 0, .none }, - .{ .imul, .rmi, &.{ .r64, .rm64, .imm32 }, &.{ 0x69 }, 0, .long }, - - .{ .int3, .np, &.{}, &.{ 0xcc }, 0, .none }, - - .{ .ja, .d, &.{ .rel32 }, &.{ 0x0f, 0x87 }, 0, .none }, - .{ .jae, .d, &.{ .rel32 }, &.{ 0x0f, 0x83 }, 0, .none }, - .{ .jb, .d, &.{ .rel32 }, &.{ 0x0f, 0x82 }, 0, .none }, - .{ .jbe, .d, &.{ .rel32 }, &.{ 0x0f, 0x86 }, 0, .none }, - .{ .jc, .d, &.{ .rel32 }, &.{ 0x0f, 0x82 }, 0, .none }, - .{ .jrcxz, .d, &.{ .rel32 }, &.{ 0xe3 }, 0, .none }, - .{ .je, .d, &.{ .rel32 }, &.{ 0x0f, 0x84 }, 0, .none }, - .{ .jg, .d, &.{ .rel32 }, &.{ 0x0f, 0x8f }, 0, .none }, - .{ .jge, .d, &.{ .rel32 }, &.{ 0x0f, 0x8d }, 0, .none }, - .{ .jl, .d, &.{ .rel32 }, &.{ 0x0f, 0x8c }, 0, .none }, - .{ .jle, .d, &.{ .rel32 }, &.{ 0x0f, 0x8e }, 0, .none }, - .{ .jna, .d, &.{ .rel32 }, &.{ 0x0f, 0x86 }, 0, .none }, - .{ .jnae, .d, &.{ .rel32 }, &.{ 0x0f, 0x82 }, 0, .none }, - .{ .jnb, .d, &.{ .rel32 }, &.{ 0x0f, 0x83 }, 0, .none }, - .{ .jnbe, .d, &.{ .rel32 }, &.{ 0x0f, 0x87 }, 0, .none }, - .{ .jnc, .d, &.{ .rel32 }, &.{ 0x0f, 0x83 }, 0, .none }, - .{ .jne, .d, &.{ .rel32 }, &.{ 0x0f, 0x85 }, 0, .none }, - .{ .jng, .d, &.{ .rel32 }, &.{ 0x0f, 0x8e }, 0, .none }, - .{ .jnge, .d, &.{ .rel32 }, &.{ 0x0f, 0x8c }, 0, .none }, - .{ .jnl, .d, &.{ .rel32 }, &.{ 0x0f, 0x8d }, 0, .none }, - .{ .jnle, .d, &.{ .rel32 }, &.{ 0x0f, 0x8f }, 0, .none }, - .{ .jno, .d, &.{ .rel32 }, &.{ 0x0f, 0x81 }, 0, .none }, - .{ .jnp, .d, &.{ .rel32 }, &.{ 0x0f, 0x8b }, 0, .none }, - .{ .jns, .d, &.{ .rel32 }, &.{ 0x0f, 0x89 }, 0, .none }, - .{ .jnz, .d, &.{ .rel32 }, &.{ 0x0f, 0x85 }, 0, .none }, - .{ .jo, .d, &.{ .rel32 }, &.{ 0x0f, 0x80 }, 0, .none }, - .{ .jp, .d, &.{ .rel32 }, &.{ 0x0f, 0x8a }, 0, .none }, - .{ .jpe, .d, &.{ .rel32 }, &.{ 0x0f, 0x8a }, 0, .none }, - .{ .jpo, .d, &.{ .rel32 }, &.{ 0x0f, 0x8b }, 0, .none }, - .{ .js, .d, &.{ .rel32 }, &.{ 0x0f, 0x88 }, 0, .none }, - .{ .jz, .d, &.{ .rel32 }, &.{ 0x0f, 0x84 }, 0, .none }, - - .{ .jmp, .d, &.{ .rel32 }, &.{ 0xe9 }, 0, .none }, - .{ .jmp, .m, &.{ .rm64 }, &.{ 0xff }, 4, .none }, - - .{ .lea, .rm, &.{ .r16, .m }, &.{ 0x8d }, 0, .none }, - .{ .lea, .rm, &.{ .r32, .m }, &.{ 0x8d }, 0, .none }, - .{ .lea, .rm, &.{ .r64, .m }, &.{ 0x8d }, 0, .long }, - - .{ .lfence, .np, &.{}, &.{ 0x0f, 0xae, 0xe8 }, 0, .none }, - - .{ .lods, .np, &.{ .m8 }, &.{ 0xac }, 0, .none }, - .{ .lods, .np, &.{ .m16 }, &.{ 0xad }, 0, .none }, - .{ .lods, .np, &.{ .m32 }, &.{ 0xad }, 0, .none }, - .{ .lods, .np, &.{ .m64 }, &.{ 0xad }, 0, .long }, - - .{ .lodsb, .np, &.{}, &.{ 0xac }, 0, .none }, - .{ .lodsw, .np, &.{}, &.{ 0xad }, 0, .short }, - .{ .lodsd, .np, &.{}, &.{ 0xad }, 0, .none }, - .{ .lodsq, .np, &.{}, &.{ 0xad }, 0, .long }, - - .{ .lzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .none }, - .{ .lzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .none }, - .{ .lzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .long }, - - .{ .mfence, .np, &.{}, &.{ 0x0f, 0xae, 0xf0 }, 0, .none }, - - .{ .mov, .mr, &.{ .rm8, .r8 }, &.{ 0x88 }, 0, .none }, - .{ .mov, .mr, &.{ .rm8, .r8 }, &.{ 0x88 }, 0, .rex }, - .{ .mov, .mr, &.{ .rm16, .r16 }, &.{ 0x89 }, 0, .none }, - .{ .mov, .mr, &.{ .rm32, .r32 }, &.{ 0x89 }, 0, .none }, - .{ .mov, .mr, &.{ .rm64, .r64 }, &.{ 0x89 }, 0, .long }, - .{ .mov, .rm, &.{ .r8, .rm8 }, &.{ 0x8a }, 0, .none }, - .{ .mov, .rm, &.{ .r8, .rm8 }, &.{ 0x8a }, 0, .rex }, - .{ .mov, .rm, &.{ .r16, .rm16 }, &.{ 0x8b }, 0, .none }, - .{ .mov, .rm, &.{ .r32, .rm32 }, &.{ 0x8b }, 0, .none }, - .{ .mov, .rm, &.{ .r64, .rm64 }, &.{ 0x8b }, 0, .long }, - .{ .mov, .mr, &.{ .rm16, .sreg }, &.{ 0x8c }, 0, .none }, - .{ .mov, .mr, &.{ .rm64, .sreg }, &.{ 0x8c }, 0, .long }, - .{ .mov, .rm, &.{ .sreg, .rm16 }, &.{ 0x8e }, 0, .none }, - .{ .mov, .rm, &.{ .sreg, .rm64 }, &.{ 0x8e }, 0, .long }, - .{ .mov, .fd, &.{ .al, .moffs }, &.{ 0xa0 }, 0, .none }, - .{ .mov, .fd, &.{ .ax, .moffs }, &.{ 0xa1 }, 0, .none }, - .{ .mov, .fd, &.{ .eax, .moffs }, &.{ 0xa1 }, 0, .none }, - .{ .mov, .fd, &.{ .rax, .moffs }, &.{ 0xa1 }, 0, .long }, - .{ .mov, .td, &.{ .moffs, .al }, &.{ 0xa2 }, 0, .none }, - .{ .mov, .td, &.{ .moffs, .ax }, &.{ 0xa3 }, 0, .none }, - .{ .mov, .td, &.{ .moffs, .eax }, &.{ 0xa3 }, 0, .none }, - .{ .mov, .td, &.{ .moffs, .rax }, &.{ 0xa3 }, 0, .long }, - .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .none }, - .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .rex }, - .{ .mov, .oi, &.{ .r16, .imm16 }, &.{ 0xb8 }, 0, .none }, - .{ .mov, .oi, &.{ .r32, .imm32 }, &.{ 0xb8 }, 0, .none }, - .{ .mov, .oi, &.{ .r64, .imm64 }, &.{ 0xb8 }, 0, .long }, - .{ .mov, .mi, &.{ .rm8, .imm8 }, &.{ 0xc6 }, 0, .none }, - .{ .mov, .mi, &.{ .rm8, .imm8 }, &.{ 0xc6 }, 0, .rex }, - .{ .mov, .mi, &.{ .rm16, .imm16 }, &.{ 0xc7 }, 0, .none }, - .{ .mov, .mi, &.{ .rm32, .imm32 }, &.{ 0xc7 }, 0, .none }, - .{ .mov, .mi, &.{ .rm64, .imm32s }, &.{ 0xc7 }, 0, .long }, - - .{ .movbe, .rm, &.{ .r16, .m16 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .none }, - .{ .movbe, .rm, &.{ .r32, .m32 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .none }, - .{ .movbe, .rm, &.{ .r64, .m64 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .long }, - .{ .movbe, .mr, &.{ .m16, .r16 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .none }, - .{ .movbe, .mr, &.{ .m32, .r32 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .none }, - .{ .movbe, .mr, &.{ .m64, .r64 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .long }, - - .{ .movs, .np, &.{ .m8, .m8 }, &.{ 0xa4 }, 0, .none }, - .{ .movs, .np, &.{ .m16, .m16 }, &.{ 0xa5 }, 0, .none }, - .{ .movs, .np, &.{ .m32, .m32 }, &.{ 0xa5 }, 0, .none }, - .{ .movs, .np, &.{ .m64, .m64 }, &.{ 0xa5 }, 0, .long }, - - .{ .movsb, .np, &.{}, &.{ 0xa4 }, 0, .none }, - .{ .movsw, .np, &.{}, &.{ 0xa5 }, 0, .short }, - .{ .movsd, .np, &.{}, &.{ 0xa5 }, 0, .none }, - .{ .movsq, .np, &.{}, &.{ 0xa5 }, 0, .long }, - - .{ .movsx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xbe }, 0, .none }, - .{ .movsx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex }, - .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .none }, - .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex }, - .{ .movsx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xbe }, 0, .long }, - .{ .movsx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xbf }, 0, .none }, - .{ .movsx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xbf }, 0, .long }, + .{ .call, .d, &.{ .rel32 }, &.{ 0xe8 }, 0, .none, .none }, + .{ .call, .m, &.{ .rm64 }, &.{ 0xff }, 2, .none, .none }, + + .{ .cbw, .np, &.{ .o16 }, &.{ 0x98 }, 0, .none, .none }, + .{ .cwde, .np, &.{ .o32 }, &.{ 0x98 }, 0, .none, .none }, + .{ .cdqe, .np, &.{ .o64 }, &.{ 0x98 }, 0, .long, .none }, + + .{ .cwd, .np, &.{ .o16 }, &.{ 0x99 }, 0, .none, .none }, + .{ .cdq, .np, &.{ .o32 }, &.{ 0x99 }, 0, .none, .none }, + .{ .cqo, .np, &.{ .o64 }, &.{ 0x99 }, 0, .long, .none }, + + .{ .cmova, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, + .{ .cmova, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, + .{ .cmova, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x47 }, 0, .long, .none }, + .{ .cmovae, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, + .{ .cmovae, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, + .{ .cmovae, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, + .{ .cmovb, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, + .{ .cmovb, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, + .{ .cmovb, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, + .{ .cmovbe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, + .{ .cmovbe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, + .{ .cmovbe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x46 }, 0, .long, .none }, + .{ .cmovc, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, + .{ .cmovc, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, + .{ .cmovc, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, + .{ .cmove, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, + .{ .cmove, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, + .{ .cmove, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x44 }, 0, .long, .none }, + .{ .cmovg, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, + .{ .cmovg, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, + .{ .cmovg, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4f }, 0, .long, .none }, + .{ .cmovge, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, + .{ .cmovge, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, + .{ .cmovge, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4d }, 0, .long, .none }, + .{ .cmovl, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, + .{ .cmovl, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, + .{ .cmovl, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4c }, 0, .long, .none }, + .{ .cmovle, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, + .{ .cmovle, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, + .{ .cmovle, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4e }, 0, .long, .none }, + .{ .cmovna, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, + .{ .cmovna, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, + .{ .cmovna, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x46 }, 0, .long, .none }, + .{ .cmovnae, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, + .{ .cmovnae, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, + .{ .cmovnae, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, + .{ .cmovnb, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, + .{ .cmovnb, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, + .{ .cmovnb, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, + .{ .cmovnbe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, + .{ .cmovnbe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, + .{ .cmovnbe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x47 }, 0, .long, .none }, + .{ .cmovnc, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, + .{ .cmovnc, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, + .{ .cmovnc, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, + .{ .cmovne, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, + .{ .cmovne, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, + .{ .cmovne, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x45 }, 0, .long, .none }, + .{ .cmovng, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, + .{ .cmovng, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, + .{ .cmovng, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4e }, 0, .long, .none }, + .{ .cmovnge, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, + .{ .cmovnge, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, + .{ .cmovnge, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4c }, 0, .long, .none }, + .{ .cmovnl, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, + .{ .cmovnl, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, + .{ .cmovnl, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4d }, 0, .long, .none }, + .{ .cmovnle, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, + .{ .cmovnle, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, + .{ .cmovnle, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4f }, 0, .long, .none }, + .{ .cmovno, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x41 }, 0, .none, .none }, + .{ .cmovno, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x41 }, 0, .none, .none }, + .{ .cmovno, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x41 }, 0, .long, .none }, + .{ .cmovnp, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, + .{ .cmovnp, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, + .{ .cmovnp, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4b }, 0, .long, .none }, + .{ .cmovns, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x49 }, 0, .none, .none }, + .{ .cmovns, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x49 }, 0, .none, .none }, + .{ .cmovns, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x49 }, 0, .long, .none }, + .{ .cmovnz, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, + .{ .cmovnz, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, + .{ .cmovnz, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x45 }, 0, .long, .none }, + .{ .cmovo, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x40 }, 0, .none, .none }, + .{ .cmovo, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x40 }, 0, .none, .none }, + .{ .cmovo, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x40 }, 0, .long, .none }, + .{ .cmovp, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, + .{ .cmovp, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, + .{ .cmovp, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4a }, 0, .long, .none }, + .{ .cmovpe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, + .{ .cmovpe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, + .{ .cmovpe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4a }, 0, .long, .none }, + .{ .cmovpo, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, + .{ .cmovpo, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, + .{ .cmovpo, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4b }, 0, .long, .none }, + .{ .cmovs, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x48 }, 0, .none, .none }, + .{ .cmovs, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x48 }, 0, .none, .none }, + .{ .cmovs, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x48 }, 0, .long, .none }, + .{ .cmovz, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, + .{ .cmovz, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, + .{ .cmovz, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x44 }, 0, .long, .none }, + + .{ .cmp, .zi, &.{ .al, .imm8 }, &.{ 0x3c }, 0, .none, .none }, + .{ .cmp, .zi, &.{ .ax, .imm16 }, &.{ 0x3d }, 0, .none, .none }, + .{ .cmp, .zi, &.{ .eax, .imm32 }, &.{ 0x3d }, 0, .none, .none }, + .{ .cmp, .zi, &.{ .rax, .imm32s }, &.{ 0x3d }, 0, .long, .none }, + .{ .cmp, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 7, .none, .none }, + .{ .cmp, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 7, .rex, .none }, + .{ .cmp, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 7, .none, .none }, + .{ .cmp, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 7, .none, .none }, + .{ .cmp, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 7, .long, .none }, + .{ .cmp, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 7, .none, .none }, + .{ .cmp, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 7, .none, .none }, + .{ .cmp, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 7, .long, .none }, + .{ .cmp, .mr, &.{ .rm8, .r8 }, &.{ 0x38 }, 0, .none, .none }, + .{ .cmp, .mr, &.{ .rm8, .r8 }, &.{ 0x38 }, 0, .rex, .none }, + .{ .cmp, .mr, &.{ .rm16, .r16 }, &.{ 0x39 }, 0, .none, .none }, + .{ .cmp, .mr, &.{ .rm32, .r32 }, &.{ 0x39 }, 0, .none, .none }, + .{ .cmp, .mr, &.{ .rm64, .r64 }, &.{ 0x39 }, 0, .long, .none }, + .{ .cmp, .rm, &.{ .r8, .rm8 }, &.{ 0x3a }, 0, .none, .none }, + .{ .cmp, .rm, &.{ .r8, .rm8 }, &.{ 0x3a }, 0, .rex, .none }, + .{ .cmp, .rm, &.{ .r16, .rm16 }, &.{ 0x3b }, 0, .none, .none }, + .{ .cmp, .rm, &.{ .r32, .rm32 }, &.{ 0x3b }, 0, .none, .none }, + .{ .cmp, .rm, &.{ .r64, .rm64 }, &.{ 0x3b }, 0, .long, .none }, + + .{ .cmps, .np, &.{ .m8, .m8 }, &.{ 0xa6 }, 0, .none, .none }, + .{ .cmps, .np, &.{ .m16, .m16 }, &.{ 0xa7 }, 0, .none, .none }, + .{ .cmps, .np, &.{ .m32, .m32 }, &.{ 0xa7 }, 0, .none, .none }, + .{ .cmps, .np, &.{ .m64, .m64 }, &.{ 0xa7 }, 0, .long, .none }, + + .{ .cmpsb, .np, &.{}, &.{ 0xa6 }, 0, .none, .none }, + .{ .cmpsw, .np, &.{}, &.{ 0xa7 }, 0, .short, .none }, + .{ .cmpsd, .np, &.{}, &.{ 0xa7 }, 0, .none, .none }, + .{ .cmpsq, .np, &.{}, &.{ 0xa7 }, 0, .long, .none }, + + .{ .cmpxchg, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xb0 }, 0, .none, .none }, + .{ .cmpxchg, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xb0 }, 0, .rex, .none }, + .{ .cmpxchg, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xb1 }, 0, .none, .none }, + .{ .cmpxchg, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xb1 }, 0, .none, .none }, + .{ .cmpxchg, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xb1 }, 0, .long, .none }, + + .{ .cmpxchg8b, .m, &.{ .m64 }, &.{ 0x0f, 0xc7 }, 1, .none, .none }, + .{ .cmpxchg16b, .m, &.{ .m128 }, &.{ 0x0f, 0xc7 }, 1, .long, .none }, + + .{ .div, .m, &.{ .rm8 }, &.{ 0xf6 }, 6, .none, .none }, + .{ .div, .m, &.{ .rm8 }, &.{ 0xf6 }, 6, .rex, .none }, + .{ .div, .m, &.{ .rm16 }, &.{ 0xf7 }, 6, .none, .none }, + .{ .div, .m, &.{ .rm32 }, &.{ 0xf7 }, 6, .none, .none }, + .{ .div, .m, &.{ .rm64 }, &.{ 0xf7 }, 6, .long, .none }, + + .{ .fisttp, .m, &.{ .m16 }, &.{ 0xdf }, 1, .none, .x87 }, + .{ .fisttp, .m, &.{ .m32 }, &.{ 0xdb }, 1, .none, .x87 }, + .{ .fisttp, .m, &.{ .m64 }, &.{ 0xdd }, 1, .none, .x87 }, + + .{ .fld, .m, &.{ .m32 }, &.{ 0xd9 }, 0, .none, .x87 }, + .{ .fld, .m, &.{ .m64 }, &.{ 0xdd }, 0, .none, .x87 }, + .{ .fld, .m, &.{ .m80 }, &.{ 0xdb }, 5, .none, .x87 }, + + .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .none, .none }, + .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .rex, .none }, + .{ .idiv, .m, &.{ .rm16 }, &.{ 0xf7 }, 7, .none, .none }, + .{ .idiv, .m, &.{ .rm32 }, &.{ 0xf7 }, 7, .none, .none }, + .{ .idiv, .m, &.{ .rm64 }, &.{ 0xf7 }, 7, .long, .none }, + + .{ .imul, .m, &.{ .rm8 }, &.{ 0xf6 }, 5, .none, .none }, + .{ .imul, .m, &.{ .rm8 }, &.{ 0xf6 }, 5, .rex, .none }, + .{ .imul, .m, &.{ .rm16, }, &.{ 0xf7 }, 5, .none, .none }, + .{ .imul, .m, &.{ .rm32, }, &.{ 0xf7 }, 5, .none, .none }, + .{ .imul, .m, &.{ .rm64, }, &.{ 0xf7 }, 5, .long, .none }, + .{ .imul, .rm, &.{ .r16, .rm16, }, &.{ 0x0f, 0xaf }, 0, .none, .none }, + .{ .imul, .rm, &.{ .r32, .rm32, }, &.{ 0x0f, 0xaf }, 0, .none, .none }, + .{ .imul, .rm, &.{ .r64, .rm64, }, &.{ 0x0f, 0xaf }, 0, .long, .none }, + .{ .imul, .rmi, &.{ .r16, .rm16, .imm8s }, &.{ 0x6b }, 0, .none, .none }, + .{ .imul, .rmi, &.{ .r32, .rm32, .imm8s }, &.{ 0x6b }, 0, .none, .none }, + .{ .imul, .rmi, &.{ .r64, .rm64, .imm8s }, &.{ 0x6b }, 0, .long, .none }, + .{ .imul, .rmi, &.{ .r16, .rm16, .imm16 }, &.{ 0x69 }, 0, .none, .none }, + .{ .imul, .rmi, &.{ .r32, .rm32, .imm32 }, &.{ 0x69 }, 0, .none, .none }, + .{ .imul, .rmi, &.{ .r64, .rm64, .imm32 }, &.{ 0x69 }, 0, .long, .none }, + + .{ .int3, .np, &.{}, &.{ 0xcc }, 0, .none, .none }, + + .{ .ja, .d, &.{ .rel32 }, &.{ 0x0f, 0x87 }, 0, .none, .none }, + .{ .jae, .d, &.{ .rel32 }, &.{ 0x0f, 0x83 }, 0, .none, .none }, + .{ .jb, .d, &.{ .rel32 }, &.{ 0x0f, 0x82 }, 0, .none, .none }, + .{ .jbe, .d, &.{ .rel32 }, &.{ 0x0f, 0x86 }, 0, .none, .none }, + .{ .jc, .d, &.{ .rel32 }, &.{ 0x0f, 0x82 }, 0, .none, .none }, + .{ .jrcxz, .d, &.{ .rel32 }, &.{ 0xe3 }, 0, .none, .none }, + .{ .je, .d, &.{ .rel32 }, &.{ 0x0f, 0x84 }, 0, .none, .none }, + .{ .jg, .d, &.{ .rel32 }, &.{ 0x0f, 0x8f }, 0, .none, .none }, + .{ .jge, .d, &.{ .rel32 }, &.{ 0x0f, 0x8d }, 0, .none, .none }, + .{ .jl, .d, &.{ .rel32 }, &.{ 0x0f, 0x8c }, 0, .none, .none }, + .{ .jle, .d, &.{ .rel32 }, &.{ 0x0f, 0x8e }, 0, .none, .none }, + .{ .jna, .d, &.{ .rel32 }, &.{ 0x0f, 0x86 }, 0, .none, .none }, + .{ .jnae, .d, &.{ .rel32 }, &.{ 0x0f, 0x82 }, 0, .none, .none }, + .{ .jnb, .d, &.{ .rel32 }, &.{ 0x0f, 0x83 }, 0, .none, .none }, + .{ .jnbe, .d, &.{ .rel32 }, &.{ 0x0f, 0x87 }, 0, .none, .none }, + .{ .jnc, .d, &.{ .rel32 }, &.{ 0x0f, 0x83 }, 0, .none, .none }, + .{ .jne, .d, &.{ .rel32 }, &.{ 0x0f, 0x85 }, 0, .none, .none }, + .{ .jng, .d, &.{ .rel32 }, &.{ 0x0f, 0x8e }, 0, .none, .none }, + .{ .jnge, .d, &.{ .rel32 }, &.{ 0x0f, 0x8c }, 0, .none, .none }, + .{ .jnl, .d, &.{ .rel32 }, &.{ 0x0f, 0x8d }, 0, .none, .none }, + .{ .jnle, .d, &.{ .rel32 }, &.{ 0x0f, 0x8f }, 0, .none, .none }, + .{ .jno, .d, &.{ .rel32 }, &.{ 0x0f, 0x81 }, 0, .none, .none }, + .{ .jnp, .d, &.{ .rel32 }, &.{ 0x0f, 0x8b }, 0, .none, .none }, + .{ .jns, .d, &.{ .rel32 }, &.{ 0x0f, 0x89 }, 0, .none, .none }, + .{ .jnz, .d, &.{ .rel32 }, &.{ 0x0f, 0x85 }, 0, .none, .none }, + .{ .jo, .d, &.{ .rel32 }, &.{ 0x0f, 0x80 }, 0, .none, .none }, + .{ .jp, .d, &.{ .rel32 }, &.{ 0x0f, 0x8a }, 0, .none, .none }, + .{ .jpe, .d, &.{ .rel32 }, &.{ 0x0f, 0x8a }, 0, .none, .none }, + .{ .jpo, .d, &.{ .rel32 }, &.{ 0x0f, 0x8b }, 0, .none, .none }, + .{ .js, .d, &.{ .rel32 }, &.{ 0x0f, 0x88 }, 0, .none, .none }, + .{ .jz, .d, &.{ .rel32 }, &.{ 0x0f, 0x84 }, 0, .none, .none }, + + .{ .jmp, .d, &.{ .rel32 }, &.{ 0xe9 }, 0, .none, .none }, + .{ .jmp, .m, &.{ .rm64 }, &.{ 0xff }, 4, .none, .none }, + + .{ .lea, .rm, &.{ .r16, .m }, &.{ 0x8d }, 0, .none, .none }, + .{ .lea, .rm, &.{ .r32, .m }, &.{ 0x8d }, 0, .none, .none }, + .{ .lea, .rm, &.{ .r64, .m }, &.{ 0x8d }, 0, .long, .none }, + + .{ .lfence, .np, &.{}, &.{ 0x0f, 0xae, 0xe8 }, 0, .none, .none }, + + .{ .lods, .np, &.{ .m8 }, &.{ 0xac }, 0, .none, .none }, + .{ .lods, .np, &.{ .m16 }, &.{ 0xad }, 0, .none, .none }, + .{ .lods, .np, &.{ .m32 }, &.{ 0xad }, 0, .none, .none }, + .{ .lods, .np, &.{ .m64 }, &.{ 0xad }, 0, .long, .none }, + + .{ .lodsb, .np, &.{}, &.{ 0xac }, 0, .none, .none }, + .{ .lodsw, .np, &.{}, &.{ 0xad }, 0, .short, .none }, + .{ .lodsd, .np, &.{}, &.{ 0xad }, 0, .none, .none }, + .{ .lodsq, .np, &.{}, &.{ 0xad }, 0, .long, .none }, + + .{ .lzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .none, .none }, + .{ .lzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .none, .none }, + .{ .lzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .long, .none }, + + .{ .mfence, .np, &.{}, &.{ 0x0f, 0xae, 0xf0 }, 0, .none, .none }, + + .{ .mov, .mr, &.{ .rm8, .r8 }, &.{ 0x88 }, 0, .none, .none }, + .{ .mov, .mr, &.{ .rm8, .r8 }, &.{ 0x88 }, 0, .rex, .none }, + .{ .mov, .mr, &.{ .rm16, .r16 }, &.{ 0x89 }, 0, .none, .none }, + .{ .mov, .mr, &.{ .rm32, .r32 }, &.{ 0x89 }, 0, .none, .none }, + .{ .mov, .mr, &.{ .rm64, .r64 }, &.{ 0x89 }, 0, .long, .none }, + .{ .mov, .rm, &.{ .r8, .rm8 }, &.{ 0x8a }, 0, .none, .none }, + .{ .mov, .rm, &.{ .r8, .rm8 }, &.{ 0x8a }, 0, .rex, .none }, + .{ .mov, .rm, &.{ .r16, .rm16 }, &.{ 0x8b }, 0, .none, .none }, + .{ .mov, .rm, &.{ .r32, .rm32 }, &.{ 0x8b }, 0, .none, .none }, + .{ .mov, .rm, &.{ .r64, .rm64 }, &.{ 0x8b }, 0, .long, .none }, + .{ .mov, .mr, &.{ .rm16, .sreg }, &.{ 0x8c }, 0, .none, .none }, + .{ .mov, .mr, &.{ .rm64, .sreg }, &.{ 0x8c }, 0, .long, .none }, + .{ .mov, .rm, &.{ .sreg, .rm16 }, &.{ 0x8e }, 0, .none, .none }, + .{ .mov, .rm, &.{ .sreg, .rm64 }, &.{ 0x8e }, 0, .long, .none }, + .{ .mov, .fd, &.{ .al, .moffs }, &.{ 0xa0 }, 0, .none, .none }, + .{ .mov, .fd, &.{ .ax, .moffs }, &.{ 0xa1 }, 0, .none, .none }, + .{ .mov, .fd, &.{ .eax, .moffs }, &.{ 0xa1 }, 0, .none, .none }, + .{ .mov, .fd, &.{ .rax, .moffs }, &.{ 0xa1 }, 0, .long, .none }, + .{ .mov, .td, &.{ .moffs, .al }, &.{ 0xa2 }, 0, .none, .none }, + .{ .mov, .td, &.{ .moffs, .ax }, &.{ 0xa3 }, 0, .none, .none }, + .{ .mov, .td, &.{ .moffs, .eax }, &.{ 0xa3 }, 0, .none, .none }, + .{ .mov, .td, &.{ .moffs, .rax }, &.{ 0xa3 }, 0, .long, .none }, + .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .none, .none }, + .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .rex, .none }, + .{ .mov, .oi, &.{ .r16, .imm16 }, &.{ 0xb8 }, 0, .none, .none }, + .{ .mov, .oi, &.{ .r32, .imm32 }, &.{ 0xb8 }, 0, .none, .none }, + .{ .mov, .oi, &.{ .r64, .imm64 }, &.{ 0xb8 }, 0, .long, .none }, + .{ .mov, .mi, &.{ .rm8, .imm8 }, &.{ 0xc6 }, 0, .none, .none }, + .{ .mov, .mi, &.{ .rm8, .imm8 }, &.{ 0xc6 }, 0, .rex, .none }, + .{ .mov, .mi, &.{ .rm16, .imm16 }, &.{ 0xc7 }, 0, .none, .none }, + .{ .mov, .mi, &.{ .rm32, .imm32 }, &.{ 0xc7 }, 0, .none, .none }, + .{ .mov, .mi, &.{ .rm64, .imm32s }, &.{ 0xc7 }, 0, .long, .none }, + + .{ .movbe, .rm, &.{ .r16, .m16 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .none, .none }, + .{ .movbe, .rm, &.{ .r32, .m32 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .none, .none }, + .{ .movbe, .rm, &.{ .r64, .m64 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .long, .none }, + .{ .movbe, .mr, &.{ .m16, .r16 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .none, .none }, + .{ .movbe, .mr, &.{ .m32, .r32 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .none, .none }, + .{ .movbe, .mr, &.{ .m64, .r64 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .long, .none }, + + .{ .movs, .np, &.{ .m8, .m8 }, &.{ 0xa4 }, 0, .none, .none }, + .{ .movs, .np, &.{ .m16, .m16 }, &.{ 0xa5 }, 0, .none, .none }, + .{ .movs, .np, &.{ .m32, .m32 }, &.{ 0xa5 }, 0, .none, .none }, + .{ .movs, .np, &.{ .m64, .m64 }, &.{ 0xa5 }, 0, .long, .none }, + + .{ .movsb, .np, &.{}, &.{ 0xa4 }, 0, .none, .none }, + .{ .movsw, .np, &.{}, &.{ 0xa5 }, 0, .short, .none }, + .{ .movsd, .np, &.{}, &.{ 0xa5 }, 0, .none, .none }, + .{ .movsq, .np, &.{}, &.{ 0xa5 }, 0, .long, .none }, + + .{ .movsx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xbe }, 0, .none, .none }, + .{ .movsx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex, .none }, + .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .none, .none }, + .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex, .none }, + .{ .movsx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xbe }, 0, .long, .none }, + .{ .movsx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xbf }, 0, .none, .none }, + .{ .movsx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xbf }, 0, .long, .none }, // This instruction is discouraged. - .{ .movsxd, .rm, &.{ .r32, .rm32 }, &.{ 0x63 }, 0, .none }, - .{ .movsxd, .rm, &.{ .r64, .rm32 }, &.{ 0x63 }, 0, .long }, - - .{ .movzx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .none }, - .{ .movzx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .none }, - .{ .movzx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .long }, - .{ .movzx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .none }, - .{ .movzx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .long }, - - .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .none }, - .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .rex }, - .{ .mul, .m, &.{ .rm16 }, &.{ 0xf7 }, 4, .none }, - .{ .mul, .m, &.{ .rm32 }, &.{ 0xf7 }, 4, .none }, - .{ .mul, .m, &.{ .rm64 }, &.{ 0xf7 }, 4, .long }, - - .{ .neg, .m, &.{ .rm8 }, &.{ 0xf6 }, 3, .none }, - .{ .neg, .m, &.{ .rm8 }, &.{ 0xf6 }, 3, .rex }, - .{ .neg, .m, &.{ .rm16 }, &.{ 0xf7 }, 3, .none }, - .{ .neg, .m, &.{ .rm32 }, &.{ 0xf7 }, 3, .none }, - .{ .neg, .m, &.{ .rm64 }, &.{ 0xf7 }, 3, .long }, - - .{ .nop, .np, &.{}, &.{ 0x90 }, 0, .none }, - - .{ .not, .m, &.{ .rm8 }, &.{ 0xf6 }, 2, .none }, - .{ .not, .m, &.{ .rm8 }, &.{ 0xf6 }, 2, .rex }, - .{ .not, .m, &.{ .rm16 }, &.{ 0xf7 }, 2, .none }, - .{ .not, .m, &.{ .rm32 }, &.{ 0xf7 }, 2, .none }, - .{ .not, .m, &.{ .rm64 }, &.{ 0xf7 }, 2, .long }, - - .{ .@"or", .zi, &.{ .al, .imm8 }, &.{ 0x0c }, 0, .none }, - .{ .@"or", .zi, &.{ .ax, .imm16 }, &.{ 0x0d }, 0, .none }, - .{ .@"or", .zi, &.{ .eax, .imm32 }, &.{ 0x0d }, 0, .none }, - .{ .@"or", .zi, &.{ .rax, .imm32s }, &.{ 0x0d }, 0, .long }, - .{ .@"or", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 1, .none }, - .{ .@"or", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 1, .rex }, - .{ .@"or", .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 1, .none }, - .{ .@"or", .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 1, .none }, - .{ .@"or", .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 1, .long }, - .{ .@"or", .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 1, .none }, - .{ .@"or", .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 1, .none }, - .{ .@"or", .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 1, .long }, - .{ .@"or", .mr, &.{ .rm8, .r8 }, &.{ 0x08 }, 0, .none }, - .{ .@"or", .mr, &.{ .rm8, .r8 }, &.{ 0x08 }, 0, .rex }, - .{ .@"or", .mr, &.{ .rm16, .r16 }, &.{ 0x09 }, 0, .none }, - .{ .@"or", .mr, &.{ .rm32, .r32 }, &.{ 0x09 }, 0, .none }, - .{ .@"or", .mr, &.{ .rm64, .r64 }, &.{ 0x09 }, 0, .long }, - .{ .@"or", .rm, &.{ .r8, .rm8 }, &.{ 0x0a }, 0, .none }, - .{ .@"or", .rm, &.{ .r8, .rm8 }, &.{ 0x0a }, 0, .rex }, - .{ .@"or", .rm, &.{ .r16, .rm16 }, &.{ 0x0b }, 0, .none }, - .{ .@"or", .rm, &.{ .r32, .rm32 }, &.{ 0x0b }, 0, .none }, - .{ .@"or", .rm, &.{ .r64, .rm64 }, &.{ 0x0b }, 0, .long }, - - .{ .pop, .o, &.{ .r16 }, &.{ 0x58 }, 0, .none }, - .{ .pop, .o, &.{ .r64 }, &.{ 0x58 }, 0, .none }, - .{ .pop, .m, &.{ .rm16 }, &.{ 0x8f }, 0, .none }, - .{ .pop, .m, &.{ .rm64 }, &.{ 0x8f }, 0, .none }, - - .{ .popcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .none }, - .{ .popcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .none }, - .{ .popcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .long }, - - .{ .push, .o, &.{ .r16 }, &.{ 0x50 }, 0, .none }, - .{ .push, .o, &.{ .r64 }, &.{ 0x50 }, 0, .none }, - .{ .push, .m, &.{ .rm16 }, &.{ 0xff }, 6, .none }, - .{ .push, .m, &.{ .rm64 }, &.{ 0xff }, 6, .none }, - .{ .push, .i, &.{ .imm8 }, &.{ 0x6a }, 0, .none }, - .{ .push, .i, &.{ .imm16 }, &.{ 0x68 }, 0, .none }, - .{ .push, .i, &.{ .imm32 }, &.{ 0x68 }, 0, .none }, - - .{ .ret, .np, &.{}, &.{ 0xc3 }, 0, .none }, - - .{ .rcl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 2, .none }, - .{ .rcl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 2, .rex }, - .{ .rcl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 2, .none }, - .{ .rcl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 2, .rex }, - .{ .rcl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 2, .none }, - .{ .rcl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 2, .rex }, - .{ .rcl, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 2, .none }, - .{ .rcl, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 2, .none }, - .{ .rcl, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 2, .none }, - .{ .rcl, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 2, .none }, - .{ .rcl, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 2, .long }, - .{ .rcl, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 2, .none }, - .{ .rcl, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 2, .long }, - .{ .rcl, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 2, .none }, - .{ .rcl, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 2, .long }, - - .{ .rcr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 3, .none }, - .{ .rcr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 3, .rex }, - .{ .rcr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 3, .none }, - .{ .rcr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 3, .rex }, - .{ .rcr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 3, .none }, - .{ .rcr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 3, .rex }, - .{ .rcr, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 3, .none }, - .{ .rcr, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 3, .none }, - .{ .rcr, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 3, .none }, - .{ .rcr, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 3, .none }, - .{ .rcr, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 3, .long }, - .{ .rcr, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 3, .none }, - .{ .rcr, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 3, .long }, - .{ .rcr, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 3, .none }, - .{ .rcr, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 3, .long }, - - .{ .rol, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 0, .none }, - .{ .rol, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 0, .rex }, - .{ .rol, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 0, .none }, - .{ .rol, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 0, .rex }, - .{ .rol, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 0, .none }, - .{ .rol, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 0, .rex }, - .{ .rol, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 0, .none }, - .{ .rol, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 0, .none }, - .{ .rol, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 0, .none }, - .{ .rol, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 0, .none }, - .{ .rol, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 0, .long }, - .{ .rol, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 0, .none }, - .{ .rol, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 0, .long }, - .{ .rol, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 0, .none }, - .{ .rol, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 0, .long }, - - .{ .ror, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 1, .none }, - .{ .ror, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 1, .rex }, - .{ .ror, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 1, .none }, - .{ .ror, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 1, .rex }, - .{ .ror, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 1, .none }, - .{ .ror, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 1, .rex }, - .{ .ror, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 1, .none }, - .{ .ror, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 1, .none }, - .{ .ror, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 1, .none }, - .{ .ror, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 1, .none }, - .{ .ror, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 1, .long }, - .{ .ror, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 1, .none }, - .{ .ror, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 1, .long }, - .{ .ror, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 1, .none }, - .{ .ror, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 1, .long }, - - .{ .sal, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .none }, - .{ .sal, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .rex }, - .{ .sal, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 4, .none }, - .{ .sal, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 4, .none }, - .{ .sal, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 4, .long }, - .{ .sal, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .none }, - .{ .sal, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .rex }, - .{ .sal, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 4, .none }, - .{ .sal, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 4, .none }, - .{ .sal, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 4, .long }, - .{ .sal, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .none }, - .{ .sal, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .rex }, - .{ .sal, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 4, .none }, - .{ .sal, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 4, .none }, - .{ .sal, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 4, .long }, - - .{ .sar, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 7, .none }, - .{ .sar, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 7, .rex }, - .{ .sar, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 7, .none }, - .{ .sar, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 7, .none }, - .{ .sar, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 7, .long }, - .{ .sar, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 7, .none }, - .{ .sar, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 7, .rex }, - .{ .sar, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 7, .none }, - .{ .sar, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 7, .none }, - .{ .sar, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 7, .long }, - .{ .sar, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 7, .none }, - .{ .sar, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 7, .rex }, - .{ .sar, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 7, .none }, - .{ .sar, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 7, .none }, - .{ .sar, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 7, .long }, - - .{ .sbb, .zi, &.{ .al, .imm8 }, &.{ 0x1c }, 0, .none }, - .{ .sbb, .zi, &.{ .ax, .imm16 }, &.{ 0x1d }, 0, .none }, - .{ .sbb, .zi, &.{ .eax, .imm32 }, &.{ 0x1d }, 0, .none }, - .{ .sbb, .zi, &.{ .rax, .imm32s }, &.{ 0x1d }, 0, .long }, - .{ .sbb, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 3, .none }, - .{ .sbb, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 3, .rex }, - .{ .sbb, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 3, .none }, - .{ .sbb, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 3, .none }, - .{ .sbb, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 3, .long }, - .{ .sbb, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 3, .none }, - .{ .sbb, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 3, .none }, - .{ .sbb, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 3, .long }, - .{ .sbb, .mr, &.{ .rm8, .r8 }, &.{ 0x18 }, 0, .none }, - .{ .sbb, .mr, &.{ .rm8, .r8 }, &.{ 0x18 }, 0, .rex }, - .{ .sbb, .mr, &.{ .rm16, .r16 }, &.{ 0x19 }, 0, .none }, - .{ .sbb, .mr, &.{ .rm32, .r32 }, &.{ 0x19 }, 0, .none }, - .{ .sbb, .mr, &.{ .rm64, .r64 }, &.{ 0x19 }, 0, .long }, - .{ .sbb, .rm, &.{ .r8, .rm8 }, &.{ 0x1a }, 0, .none }, - .{ .sbb, .rm, &.{ .r8, .rm8 }, &.{ 0x1a }, 0, .rex }, - .{ .sbb, .rm, &.{ .r16, .rm16 }, &.{ 0x1b }, 0, .none }, - .{ .sbb, .rm, &.{ .r32, .rm32 }, &.{ 0x1b }, 0, .none }, - .{ .sbb, .rm, &.{ .r64, .rm64 }, &.{ 0x1b }, 0, .long }, - - .{ .scas, .np, &.{ .m8 }, &.{ 0xae }, 0, .none }, - .{ .scas, .np, &.{ .m16 }, &.{ 0xaf }, 0, .none }, - .{ .scas, .np, &.{ .m32 }, &.{ 0xaf }, 0, .none }, - .{ .scas, .np, &.{ .m64 }, &.{ 0xaf }, 0, .long }, - - .{ .scasb, .np, &.{}, &.{ 0xae }, 0, .none }, - .{ .scasw, .np, &.{}, &.{ 0xaf }, 0, .short }, - .{ .scasd, .np, &.{}, &.{ 0xaf }, 0, .none }, - .{ .scasq, .np, &.{}, &.{ 0xaf }, 0, .long }, - - .{ .seta, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .none }, - .{ .seta, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .rex }, - .{ .setae, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .none }, - .{ .setae, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .rex }, - .{ .setb, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .none }, - .{ .setb, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .rex }, - .{ .setbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .none }, - .{ .setbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .rex }, - .{ .setc, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .none }, - .{ .setc, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .rex }, - .{ .sete, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .none }, - .{ .sete, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .rex }, - .{ .setg, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .none }, - .{ .setg, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .rex }, - .{ .setge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .none }, - .{ .setge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .rex }, - .{ .setl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .none }, - .{ .setl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .rex }, - .{ .setle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .none }, - .{ .setle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .rex }, - .{ .setna, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .none }, - .{ .setna, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .rex }, - .{ .setnae, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .none }, - .{ .setnae, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .rex }, - .{ .setnb, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .none }, - .{ .setnb, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .rex }, - .{ .setnbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .none }, - .{ .setnbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .rex }, - .{ .setnc, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .none }, - .{ .setnc, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .rex }, - .{ .setne, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .none }, - .{ .setne, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .rex }, - .{ .setng, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .none }, - .{ .setng, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .rex }, - .{ .setnge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .none }, - .{ .setnge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .rex }, - .{ .setnl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .none }, - .{ .setnl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .rex }, - .{ .setnle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .none }, - .{ .setnle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .rex }, - .{ .setno, .m, &.{ .rm8 }, &.{ 0x0f, 0x91 }, 0, .none }, - .{ .setno, .m, &.{ .rm8 }, &.{ 0x0f, 0x91 }, 0, .rex }, - .{ .setnp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .none }, - .{ .setnp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .rex }, - .{ .setns, .m, &.{ .rm8 }, &.{ 0x0f, 0x99 }, 0, .none }, - .{ .setns, .m, &.{ .rm8 }, &.{ 0x0f, 0x99 }, 0, .rex }, - .{ .setnz, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .none }, - .{ .setnz, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .rex }, - .{ .seto, .m, &.{ .rm8 }, &.{ 0x0f, 0x90 }, 0, .none }, - .{ .seto, .m, &.{ .rm8 }, &.{ 0x0f, 0x90 }, 0, .rex }, - .{ .setp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .none }, - .{ .setp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .rex }, - .{ .setpe, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .none }, - .{ .setpe, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .rex }, - .{ .setpo, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .none }, - .{ .setpo, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .rex }, - .{ .sets, .m, &.{ .rm8 }, &.{ 0x0f, 0x98 }, 0, .none }, - .{ .sets, .m, &.{ .rm8 }, &.{ 0x0f, 0x98 }, 0, .rex }, - .{ .setz, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .none }, - .{ .setz, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .rex }, - - .{ .sfence, .np, &.{}, &.{ 0x0f, 0xae, 0xf8 }, 0, .none }, - - .{ .shl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .none }, - .{ .shl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .rex }, - .{ .shl, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 4, .none }, - .{ .shl, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 4, .none }, - .{ .shl, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 4, .long }, - .{ .shl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .none }, - .{ .shl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .rex }, - .{ .shl, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 4, .none }, - .{ .shl, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 4, .none }, - .{ .shl, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 4, .long }, - .{ .shl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .none }, - .{ .shl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .rex }, - .{ .shl, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 4, .none }, - .{ .shl, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 4, .none }, - .{ .shl, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 4, .long }, - - .{ .shld, .mri, &.{ .rm16, .r16, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .none }, - .{ .shld, .mrc, &.{ .rm16, .r16, .cl }, &.{ 0x0f, 0xa5 }, 0, .none }, - .{ .shld, .mri, &.{ .rm32, .r32, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .none }, - .{ .shld, .mri, &.{ .rm64, .r64, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .long }, - .{ .shld, .mrc, &.{ .rm32, .r32, .cl }, &.{ 0x0f, 0xa5 }, 0, .none }, - .{ .shld, .mrc, &.{ .rm64, .r64, .cl }, &.{ 0x0f, 0xa5 }, 0, .long }, - - .{ .shr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 5, .none }, - .{ .shr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 5, .rex }, - .{ .shr, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 5, .none }, - .{ .shr, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 5, .none }, - .{ .shr, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 5, .long }, - .{ .shr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 5, .none }, - .{ .shr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 5, .rex }, - .{ .shr, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 5, .none }, - .{ .shr, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 5, .none }, - .{ .shr, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 5, .long }, - .{ .shr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 5, .none }, - .{ .shr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 5, .rex }, - .{ .shr, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 5, .none }, - .{ .shr, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 5, .none }, - .{ .shr, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 5, .long }, - - .{ .shrd, .mri, &.{ .rm16, .r16, .imm8 }, &.{ 0x0f, 0xac }, 0, .none }, - .{ .shrd, .mrc, &.{ .rm16, .r16, .cl }, &.{ 0x0f, 0xad }, 0, .none }, - .{ .shrd, .mri, &.{ .rm32, .r32, .imm8 }, &.{ 0x0f, 0xac }, 0, .none }, - .{ .shrd, .mri, &.{ .rm64, .r64, .imm8 }, &.{ 0x0f, 0xac }, 0, .long }, - .{ .shrd, .mrc, &.{ .rm32, .r32, .cl }, &.{ 0x0f, 0xad }, 0, .none }, - .{ .shrd, .mrc, &.{ .rm64, .r64, .cl }, &.{ 0x0f, 0xad }, 0, .long }, - - .{ .stos, .np, &.{ .m8 }, &.{ 0xaa }, 0, .none }, - .{ .stos, .np, &.{ .m16 }, &.{ 0xab }, 0, .none }, - .{ .stos, .np, &.{ .m32 }, &.{ 0xab }, 0, .none }, - .{ .stos, .np, &.{ .m64 }, &.{ 0xab }, 0, .long }, - - .{ .stosb, .np, &.{}, &.{ 0xaa }, 0, .none }, - .{ .stosw, .np, &.{}, &.{ 0xab }, 0, .short }, - .{ .stosd, .np, &.{}, &.{ 0xab }, 0, .none }, - .{ .stosq, .np, &.{}, &.{ 0xab }, 0, .long }, - - .{ .sub, .zi, &.{ .al, .imm8 }, &.{ 0x2c }, 0, .none }, - .{ .sub, .zi, &.{ .ax, .imm16 }, &.{ 0x2d }, 0, .none }, - .{ .sub, .zi, &.{ .eax, .imm32 }, &.{ 0x2d }, 0, .none }, - .{ .sub, .zi, &.{ .rax, .imm32s }, &.{ 0x2d }, 0, .long }, - .{ .sub, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 5, .none }, - .{ .sub, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 5, .rex }, - .{ .sub, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 5, .none }, - .{ .sub, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 5, .none }, - .{ .sub, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 5, .long }, - .{ .sub, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 5, .none }, - .{ .sub, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 5, .none }, - .{ .sub, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 5, .long }, - .{ .sub, .mr, &.{ .rm8, .r8 }, &.{ 0x28 }, 0, .none }, - .{ .sub, .mr, &.{ .rm8, .r8 }, &.{ 0x28 }, 0, .rex }, - .{ .sub, .mr, &.{ .rm16, .r16 }, &.{ 0x29 }, 0, .none }, - .{ .sub, .mr, &.{ .rm32, .r32 }, &.{ 0x29 }, 0, .none }, - .{ .sub, .mr, &.{ .rm64, .r64 }, &.{ 0x29 }, 0, .long }, - .{ .sub, .rm, &.{ .r8, .rm8 }, &.{ 0x2a }, 0, .none }, - .{ .sub, .rm, &.{ .r8, .rm8 }, &.{ 0x2a }, 0, .rex }, - .{ .sub, .rm, &.{ .r16, .rm16 }, &.{ 0x2b }, 0, .none }, - .{ .sub, .rm, &.{ .r32, .rm32 }, &.{ 0x2b }, 0, .none }, - .{ .sub, .rm, &.{ .r64, .rm64 }, &.{ 0x2b }, 0, .long }, - - .{ .syscall, .np, &.{}, &.{ 0x0f, 0x05 }, 0, .none } -, - .{ .@"test", .zi, &.{ .al, .imm8 }, &.{ 0xa8 }, 0, .none }, - .{ .@"test", .zi, &.{ .ax, .imm16 }, &.{ 0xa9 }, 0, .none }, - .{ .@"test", .zi, &.{ .eax, .imm32 }, &.{ 0xa9 }, 0, .none }, - .{ .@"test", .zi, &.{ .rax, .imm32s }, &.{ 0xa9 }, 0, .long }, - .{ .@"test", .mi, &.{ .rm8, .imm8 }, &.{ 0xf6 }, 0, .none }, - .{ .@"test", .mi, &.{ .rm8, .imm8 }, &.{ 0xf6 }, 0, .rex }, - .{ .@"test", .mi, &.{ .rm16, .imm16 }, &.{ 0xf7 }, 0, .none }, - .{ .@"test", .mi, &.{ .rm32, .imm32 }, &.{ 0xf7 }, 0, .none }, - .{ .@"test", .mi, &.{ .rm64, .imm32s }, &.{ 0xf7 }, 0, .long }, - .{ .@"test", .mr, &.{ .rm8, .r8 }, &.{ 0x84 }, 0, .none }, - .{ .@"test", .mr, &.{ .rm8, .r8 }, &.{ 0x84 }, 0, .rex }, - .{ .@"test", .mr, &.{ .rm16, .r16 }, &.{ 0x85 }, 0, .none }, - .{ .@"test", .mr, &.{ .rm32, .r32 }, &.{ 0x85 }, 0, .none }, - .{ .@"test", .mr, &.{ .rm64, .r64 }, &.{ 0x85 }, 0, .long }, - - .{ .tzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .none }, - .{ .tzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .none }, - .{ .tzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .long }, - - .{ .ud2, .np, &.{}, &.{ 0x0f, 0x0b }, 0, .none }, - - .{ .xadd, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xc0 }, 0, .none }, - .{ .xadd, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xc0 }, 0, .rex }, - .{ .xadd, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xc1 }, 0, .none }, - .{ .xadd, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xc1 }, 0, .none }, - .{ .xadd, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xc1 }, 0, .long }, - - .{ .xchg, .o, &.{ .ax, .r16 }, &.{ 0x90 }, 0, .none }, - .{ .xchg, .o, &.{ .r16, .ax }, &.{ 0x90 }, 0, .none }, - .{ .xchg, .o, &.{ .eax, .r32 }, &.{ 0x90 }, 0, .none }, - .{ .xchg, .o, &.{ .rax, .r64 }, &.{ 0x90 }, 0, .long }, - .{ .xchg, .o, &.{ .r32, .eax }, &.{ 0x90 }, 0, .none }, - .{ .xchg, .o, &.{ .r64, .rax }, &.{ 0x90 }, 0, .long }, - .{ .xchg, .mr, &.{ .rm8, .r8 }, &.{ 0x86 }, 0, .none }, - .{ .xchg, .mr, &.{ .rm8, .r8 }, &.{ 0x86 }, 0, .rex }, - .{ .xchg, .rm, &.{ .r8, .rm8 }, &.{ 0x86 }, 0, .none }, - .{ .xchg, .rm, &.{ .r8, .rm8 }, &.{ 0x86 }, 0, .rex }, - .{ .xchg, .mr, &.{ .rm16, .r16 }, &.{ 0x87 }, 0, .none }, - .{ .xchg, .rm, &.{ .r16, .rm16 }, &.{ 0x87 }, 0, .none }, - .{ .xchg, .mr, &.{ .rm32, .r32 }, &.{ 0x87 }, 0, .none }, - .{ .xchg, .mr, &.{ .rm64, .r64 }, &.{ 0x87 }, 0, .long }, - .{ .xchg, .rm, &.{ .r32, .rm32 }, &.{ 0x87 }, 0, .none }, - .{ .xchg, .rm, &.{ .r64, .rm64 }, &.{ 0x87 }, 0, .long }, - - .{ .xor, .zi, &.{ .al, .imm8 }, &.{ 0x34 }, 0, .none }, - .{ .xor, .zi, &.{ .ax, .imm16 }, &.{ 0x35 }, 0, .none }, - .{ .xor, .zi, &.{ .eax, .imm32 }, &.{ 0x35 }, 0, .none }, - .{ .xor, .zi, &.{ .rax, .imm32s }, &.{ 0x35 }, 0, .long }, - .{ .xor, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 6, .none }, - .{ .xor, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 6, .rex }, - .{ .xor, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 6, .none }, - .{ .xor, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 6, .none }, - .{ .xor, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 6, .long }, - .{ .xor, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 6, .none }, - .{ .xor, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 6, .none }, - .{ .xor, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 6, .long }, - .{ .xor, .mr, &.{ .rm8, .r8 }, &.{ 0x30 }, 0, .none }, - .{ .xor, .mr, &.{ .rm8, .r8 }, &.{ 0x30 }, 0, .rex }, - .{ .xor, .mr, &.{ .rm16, .r16 }, &.{ 0x31 }, 0, .none }, - .{ .xor, .mr, &.{ .rm32, .r32 }, &.{ 0x31 }, 0, .none }, - .{ .xor, .mr, &.{ .rm64, .r64 }, &.{ 0x31 }, 0, .long }, - .{ .xor, .rm, &.{ .r8, .rm8 }, &.{ 0x32 }, 0, .none }, - .{ .xor, .rm, &.{ .r8, .rm8 }, &.{ 0x32 }, 0, .rex }, - .{ .xor, .rm, &.{ .r16, .rm16 }, &.{ 0x33 }, 0, .none }, - .{ .xor, .rm, &.{ .r32, .rm32 }, &.{ 0x33 }, 0, .none }, - .{ .xor, .rm, &.{ .r64, .rm64 }, &.{ 0x33 }, 0, .long }, + .{ .movsxd, .rm, &.{ .r32, .rm32 }, &.{ 0x63 }, 0, .none, .none }, + .{ .movsxd, .rm, &.{ .r64, .rm32 }, &.{ 0x63 }, 0, .long, .none }, + + .{ .movzx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .none, .none }, + .{ .movzx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .none, .none }, + .{ .movzx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .long, .none }, + .{ .movzx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .none, .none }, + .{ .movzx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .long, .none }, + + .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .none, .none }, + .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .rex, .none }, + .{ .mul, .m, &.{ .rm16 }, &.{ 0xf7 }, 4, .none, .none }, + .{ .mul, .m, &.{ .rm32 }, &.{ 0xf7 }, 4, .none, .none }, + .{ .mul, .m, &.{ .rm64 }, &.{ 0xf7 }, 4, .long, .none }, + + .{ .neg, .m, &.{ .rm8 }, &.{ 0xf6 }, 3, .none, .none }, + .{ .neg, .m, &.{ .rm8 }, &.{ 0xf6 }, 3, .rex, .none }, + .{ .neg, .m, &.{ .rm16 }, &.{ 0xf7 }, 3, .none, .none }, + .{ .neg, .m, &.{ .rm32 }, &.{ 0xf7 }, 3, .none, .none }, + .{ .neg, .m, &.{ .rm64 }, &.{ 0xf7 }, 3, .long, .none }, + + .{ .nop, .np, &.{}, &.{ 0x90 }, 0, .none, .none }, + + .{ .not, .m, &.{ .rm8 }, &.{ 0xf6 }, 2, .none, .none }, + .{ .not, .m, &.{ .rm8 }, &.{ 0xf6 }, 2, .rex, .none }, + .{ .not, .m, &.{ .rm16 }, &.{ 0xf7 }, 2, .none, .none }, + .{ .not, .m, &.{ .rm32 }, &.{ 0xf7 }, 2, .none, .none }, + .{ .not, .m, &.{ .rm64 }, &.{ 0xf7 }, 2, .long, .none }, + + .{ .@"or", .zi, &.{ .al, .imm8 }, &.{ 0x0c }, 0, .none, .none }, + .{ .@"or", .zi, &.{ .ax, .imm16 }, &.{ 0x0d }, 0, .none, .none }, + .{ .@"or", .zi, &.{ .eax, .imm32 }, &.{ 0x0d }, 0, .none, .none }, + .{ .@"or", .zi, &.{ .rax, .imm32s }, &.{ 0x0d }, 0, .long, .none }, + .{ .@"or", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 1, .none, .none }, + .{ .@"or", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 1, .rex, .none }, + .{ .@"or", .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 1, .none, .none }, + .{ .@"or", .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 1, .none, .none }, + .{ .@"or", .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 1, .long, .none }, + .{ .@"or", .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 1, .none, .none }, + .{ .@"or", .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 1, .none, .none }, + .{ .@"or", .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 1, .long, .none }, + .{ .@"or", .mr, &.{ .rm8, .r8 }, &.{ 0x08 }, 0, .none, .none }, + .{ .@"or", .mr, &.{ .rm8, .r8 }, &.{ 0x08 }, 0, .rex, .none }, + .{ .@"or", .mr, &.{ .rm16, .r16 }, &.{ 0x09 }, 0, .none, .none }, + .{ .@"or", .mr, &.{ .rm32, .r32 }, &.{ 0x09 }, 0, .none, .none }, + .{ .@"or", .mr, &.{ .rm64, .r64 }, &.{ 0x09 }, 0, .long, .none }, + .{ .@"or", .rm, &.{ .r8, .rm8 }, &.{ 0x0a }, 0, .none, .none }, + .{ .@"or", .rm, &.{ .r8, .rm8 }, &.{ 0x0a }, 0, .rex, .none }, + .{ .@"or", .rm, &.{ .r16, .rm16 }, &.{ 0x0b }, 0, .none, .none }, + .{ .@"or", .rm, &.{ .r32, .rm32 }, &.{ 0x0b }, 0, .none, .none }, + .{ .@"or", .rm, &.{ .r64, .rm64 }, &.{ 0x0b }, 0, .long, .none }, + + .{ .pop, .o, &.{ .r16 }, &.{ 0x58 }, 0, .none, .none }, + .{ .pop, .o, &.{ .r64 }, &.{ 0x58 }, 0, .none, .none }, + .{ .pop, .m, &.{ .rm16 }, &.{ 0x8f }, 0, .none, .none }, + .{ .pop, .m, &.{ .rm64 }, &.{ 0x8f }, 0, .none, .none }, + + .{ .popcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .none, .none }, + .{ .popcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .none, .none }, + .{ .popcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .long, .none }, + + .{ .push, .o, &.{ .r16 }, &.{ 0x50 }, 0, .none, .none }, + .{ .push, .o, &.{ .r64 }, &.{ 0x50 }, 0, .none, .none }, + .{ .push, .m, &.{ .rm16 }, &.{ 0xff }, 6, .none, .none }, + .{ .push, .m, &.{ .rm64 }, &.{ 0xff }, 6, .none, .none }, + .{ .push, .i, &.{ .imm8 }, &.{ 0x6a }, 0, .none, .none }, + .{ .push, .i, &.{ .imm16 }, &.{ 0x68 }, 0, .none, .none }, + .{ .push, .i, &.{ .imm32 }, &.{ 0x68 }, 0, .none, .none }, + + .{ .ret, .np, &.{}, &.{ 0xc3 }, 0, .none, .none }, + + .{ .rcl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 2, .none, .none }, + .{ .rcl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 2, .rex, .none }, + .{ .rcl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 2, .none, .none }, + .{ .rcl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 2, .rex, .none }, + .{ .rcl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 2, .none, .none }, + .{ .rcl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 2, .rex, .none }, + .{ .rcl, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 2, .none, .none }, + .{ .rcl, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 2, .none, .none }, + .{ .rcl, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 2, .none, .none }, + .{ .rcl, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 2, .none, .none }, + .{ .rcl, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 2, .long, .none }, + .{ .rcl, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 2, .none, .none }, + .{ .rcl, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 2, .long, .none }, + .{ .rcl, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 2, .none, .none }, + .{ .rcl, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 2, .long, .none }, + + .{ .rcr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 3, .none, .none }, + .{ .rcr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 3, .rex, .none }, + .{ .rcr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 3, .none, .none }, + .{ .rcr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 3, .rex, .none }, + .{ .rcr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 3, .none, .none }, + .{ .rcr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 3, .rex, .none }, + .{ .rcr, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 3, .none, .none }, + .{ .rcr, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 3, .none, .none }, + .{ .rcr, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 3, .none, .none }, + .{ .rcr, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 3, .none, .none }, + .{ .rcr, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 3, .long, .none }, + .{ .rcr, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 3, .none, .none }, + .{ .rcr, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 3, .long, .none }, + .{ .rcr, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 3, .none, .none }, + .{ .rcr, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 3, .long, .none }, + + .{ .rol, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 0, .none, .none }, + .{ .rol, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 0, .rex, .none }, + .{ .rol, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 0, .none, .none }, + .{ .rol, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 0, .rex, .none }, + .{ .rol, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 0, .none, .none }, + .{ .rol, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 0, .rex, .none }, + .{ .rol, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 0, .none, .none }, + .{ .rol, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 0, .none, .none }, + .{ .rol, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 0, .none, .none }, + .{ .rol, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 0, .none, .none }, + .{ .rol, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 0, .long, .none }, + .{ .rol, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 0, .none, .none }, + .{ .rol, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 0, .long, .none }, + .{ .rol, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 0, .none, .none }, + .{ .rol, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 0, .long, .none }, + + .{ .ror, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 1, .none, .none }, + .{ .ror, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 1, .rex, .none }, + .{ .ror, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 1, .none, .none }, + .{ .ror, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 1, .rex, .none }, + .{ .ror, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 1, .none, .none }, + .{ .ror, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 1, .rex, .none }, + .{ .ror, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 1, .none, .none }, + .{ .ror, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 1, .none, .none }, + .{ .ror, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 1, .none, .none }, + .{ .ror, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 1, .none, .none }, + .{ .ror, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 1, .long, .none }, + .{ .ror, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 1, .none, .none }, + .{ .ror, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 1, .long, .none }, + .{ .ror, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 1, .none, .none }, + .{ .ror, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 1, .long, .none }, + + .{ .sal, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .none, .none }, + .{ .sal, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .rex, .none }, + .{ .sal, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 4, .none, .none }, + .{ .sal, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 4, .none, .none }, + .{ .sal, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 4, .long, .none }, + .{ .sal, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .none, .none }, + .{ .sal, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .rex, .none }, + .{ .sal, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 4, .none, .none }, + .{ .sal, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 4, .none, .none }, + .{ .sal, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 4, .long, .none }, + .{ .sal, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .none, .none }, + .{ .sal, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .rex, .none }, + .{ .sal, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, + .{ .sal, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, + .{ .sal, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 4, .long, .none }, + + .{ .sar, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 7, .none, .none }, + .{ .sar, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 7, .rex, .none }, + .{ .sar, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 7, .none, .none }, + .{ .sar, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 7, .none, .none }, + .{ .sar, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 7, .long, .none }, + .{ .sar, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 7, .none, .none }, + .{ .sar, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 7, .rex, .none }, + .{ .sar, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 7, .none, .none }, + .{ .sar, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 7, .none, .none }, + .{ .sar, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 7, .long, .none }, + .{ .sar, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 7, .none, .none }, + .{ .sar, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 7, .rex, .none }, + .{ .sar, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 7, .none, .none }, + .{ .sar, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 7, .none, .none }, + .{ .sar, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 7, .long, .none }, + + .{ .sbb, .zi, &.{ .al, .imm8 }, &.{ 0x1c }, 0, .none, .none }, + .{ .sbb, .zi, &.{ .ax, .imm16 }, &.{ 0x1d }, 0, .none, .none }, + .{ .sbb, .zi, &.{ .eax, .imm32 }, &.{ 0x1d }, 0, .none, .none }, + .{ .sbb, .zi, &.{ .rax, .imm32s }, &.{ 0x1d }, 0, .long, .none }, + .{ .sbb, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 3, .none, .none }, + .{ .sbb, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 3, .rex, .none }, + .{ .sbb, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 3, .none, .none }, + .{ .sbb, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 3, .none, .none }, + .{ .sbb, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 3, .long, .none }, + .{ .sbb, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 3, .none, .none }, + .{ .sbb, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 3, .none, .none }, + .{ .sbb, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 3, .long, .none }, + .{ .sbb, .mr, &.{ .rm8, .r8 }, &.{ 0x18 }, 0, .none, .none }, + .{ .sbb, .mr, &.{ .rm8, .r8 }, &.{ 0x18 }, 0, .rex, .none }, + .{ .sbb, .mr, &.{ .rm16, .r16 }, &.{ 0x19 }, 0, .none, .none }, + .{ .sbb, .mr, &.{ .rm32, .r32 }, &.{ 0x19 }, 0, .none, .none }, + .{ .sbb, .mr, &.{ .rm64, .r64 }, &.{ 0x19 }, 0, .long, .none }, + .{ .sbb, .rm, &.{ .r8, .rm8 }, &.{ 0x1a }, 0, .none, .none }, + .{ .sbb, .rm, &.{ .r8, .rm8 }, &.{ 0x1a }, 0, .rex, .none }, + .{ .sbb, .rm, &.{ .r16, .rm16 }, &.{ 0x1b }, 0, .none, .none }, + .{ .sbb, .rm, &.{ .r32, .rm32 }, &.{ 0x1b }, 0, .none, .none }, + .{ .sbb, .rm, &.{ .r64, .rm64 }, &.{ 0x1b }, 0, .long, .none }, + + .{ .scas, .np, &.{ .m8 }, &.{ 0xae }, 0, .none, .none }, + .{ .scas, .np, &.{ .m16 }, &.{ 0xaf }, 0, .none, .none }, + .{ .scas, .np, &.{ .m32 }, &.{ 0xaf }, 0, .none, .none }, + .{ .scas, .np, &.{ .m64 }, &.{ 0xaf }, 0, .long, .none }, + + .{ .scasb, .np, &.{}, &.{ 0xae }, 0, .none, .none }, + .{ .scasw, .np, &.{}, &.{ 0xaf }, 0, .short, .none }, + .{ .scasd, .np, &.{}, &.{ 0xaf }, 0, .none, .none }, + .{ .scasq, .np, &.{}, &.{ 0xaf }, 0, .long, .none }, + + .{ .seta, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .none, .none }, + .{ .seta, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .rex, .none }, + .{ .setae, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .none, .none }, + .{ .setae, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .rex, .none }, + .{ .setb, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .none, .none }, + .{ .setb, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .rex, .none }, + .{ .setbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .none, .none }, + .{ .setbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .rex, .none }, + .{ .setc, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .none, .none }, + .{ .setc, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .rex, .none }, + .{ .sete, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .none, .none }, + .{ .sete, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .rex, .none }, + .{ .setg, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .none, .none }, + .{ .setg, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .rex, .none }, + .{ .setge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .none, .none }, + .{ .setge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .rex, .none }, + .{ .setl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .none, .none }, + .{ .setl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .rex, .none }, + .{ .setle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .none, .none }, + .{ .setle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .rex, .none }, + .{ .setna, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .none, .none }, + .{ .setna, .m, &.{ .rm8 }, &.{ 0x0f, 0x96 }, 0, .rex, .none }, + .{ .setnae, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .none, .none }, + .{ .setnae, .m, &.{ .rm8 }, &.{ 0x0f, 0x92 }, 0, .rex, .none }, + .{ .setnb, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .none, .none }, + .{ .setnb, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .rex, .none }, + .{ .setnbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .none, .none }, + .{ .setnbe, .m, &.{ .rm8 }, &.{ 0x0f, 0x97 }, 0, .rex, .none }, + .{ .setnc, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .none, .none }, + .{ .setnc, .m, &.{ .rm8 }, &.{ 0x0f, 0x93 }, 0, .rex, .none }, + .{ .setne, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .none, .none }, + .{ .setne, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .rex, .none }, + .{ .setng, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .none, .none }, + .{ .setng, .m, &.{ .rm8 }, &.{ 0x0f, 0x9e }, 0, .rex, .none }, + .{ .setnge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .none, .none }, + .{ .setnge, .m, &.{ .rm8 }, &.{ 0x0f, 0x9c }, 0, .rex, .none }, + .{ .setnl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .none, .none }, + .{ .setnl, .m, &.{ .rm8 }, &.{ 0x0f, 0x9d }, 0, .rex, .none }, + .{ .setnle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .none, .none }, + .{ .setnle, .m, &.{ .rm8 }, &.{ 0x0f, 0x9f }, 0, .rex, .none }, + .{ .setno, .m, &.{ .rm8 }, &.{ 0x0f, 0x91 }, 0, .none, .none }, + .{ .setno, .m, &.{ .rm8 }, &.{ 0x0f, 0x91 }, 0, .rex, .none }, + .{ .setnp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .none, .none }, + .{ .setnp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .rex, .none }, + .{ .setns, .m, &.{ .rm8 }, &.{ 0x0f, 0x99 }, 0, .none, .none }, + .{ .setns, .m, &.{ .rm8 }, &.{ 0x0f, 0x99 }, 0, .rex, .none }, + .{ .setnz, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .none, .none }, + .{ .setnz, .m, &.{ .rm8 }, &.{ 0x0f, 0x95 }, 0, .rex, .none }, + .{ .seto, .m, &.{ .rm8 }, &.{ 0x0f, 0x90 }, 0, .none, .none }, + .{ .seto, .m, &.{ .rm8 }, &.{ 0x0f, 0x90 }, 0, .rex, .none }, + .{ .setp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .none, .none }, + .{ .setp, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .rex, .none }, + .{ .setpe, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .none, .none }, + .{ .setpe, .m, &.{ .rm8 }, &.{ 0x0f, 0x9a }, 0, .rex, .none }, + .{ .setpo, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .none, .none }, + .{ .setpo, .m, &.{ .rm8 }, &.{ 0x0f, 0x9b }, 0, .rex, .none }, + .{ .sets, .m, &.{ .rm8 }, &.{ 0x0f, 0x98 }, 0, .none, .none }, + .{ .sets, .m, &.{ .rm8 }, &.{ 0x0f, 0x98 }, 0, .rex, .none }, + .{ .setz, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .none, .none }, + .{ .setz, .m, &.{ .rm8 }, &.{ 0x0f, 0x94 }, 0, .rex, .none }, + + .{ .sfence, .np, &.{}, &.{ 0x0f, 0xae, 0xf8 }, 0, .none, .none }, + + .{ .shl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .none, .none }, + .{ .shl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .rex, .none }, + .{ .shl, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 4, .none, .none }, + .{ .shl, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 4, .none, .none }, + .{ .shl, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 4, .long, .none }, + .{ .shl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .none, .none }, + .{ .shl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .rex, .none }, + .{ .shl, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 4, .none, .none }, + .{ .shl, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 4, .none, .none }, + .{ .shl, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 4, .long, .none }, + .{ .shl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .none, .none }, + .{ .shl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .rex, .none }, + .{ .shl, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, + .{ .shl, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, + .{ .shl, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 4, .long, .none }, + + .{ .shld, .mri, &.{ .rm16, .r16, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .none, .none }, + .{ .shld, .mrc, &.{ .rm16, .r16, .cl }, &.{ 0x0f, 0xa5 }, 0, .none, .none }, + .{ .shld, .mri, &.{ .rm32, .r32, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .none, .none }, + .{ .shld, .mri, &.{ .rm64, .r64, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .long, .none }, + .{ .shld, .mrc, &.{ .rm32, .r32, .cl }, &.{ 0x0f, 0xa5 }, 0, .none, .none }, + .{ .shld, .mrc, &.{ .rm64, .r64, .cl }, &.{ 0x0f, 0xa5 }, 0, .long, .none }, + + .{ .shr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 5, .none, .none }, + .{ .shr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 5, .rex, .none }, + .{ .shr, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 5, .none, .none }, + .{ .shr, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 5, .none, .none }, + .{ .shr, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 5, .long, .none }, + .{ .shr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 5, .none, .none }, + .{ .shr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 5, .rex, .none }, + .{ .shr, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 5, .none, .none }, + .{ .shr, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 5, .none, .none }, + .{ .shr, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 5, .long, .none }, + .{ .shr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 5, .none, .none }, + .{ .shr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 5, .rex, .none }, + .{ .shr, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 5, .none, .none }, + .{ .shr, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 5, .none, .none }, + .{ .shr, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 5, .long, .none }, + + .{ .shrd, .mri, &.{ .rm16, .r16, .imm8 }, &.{ 0x0f, 0xac }, 0, .none, .none }, + .{ .shrd, .mrc, &.{ .rm16, .r16, .cl }, &.{ 0x0f, 0xad }, 0, .none, .none }, + .{ .shrd, .mri, &.{ .rm32, .r32, .imm8 }, &.{ 0x0f, 0xac }, 0, .none, .none }, + .{ .shrd, .mri, &.{ .rm64, .r64, .imm8 }, &.{ 0x0f, 0xac }, 0, .long, .none }, + .{ .shrd, .mrc, &.{ .rm32, .r32, .cl }, &.{ 0x0f, 0xad }, 0, .none, .none }, + .{ .shrd, .mrc, &.{ .rm64, .r64, .cl }, &.{ 0x0f, 0xad }, 0, .long, .none }, + + .{ .stos, .np, &.{ .m8 }, &.{ 0xaa }, 0, .none, .none }, + .{ .stos, .np, &.{ .m16 }, &.{ 0xab }, 0, .none, .none }, + .{ .stos, .np, &.{ .m32 }, &.{ 0xab }, 0, .none, .none }, + .{ .stos, .np, &.{ .m64 }, &.{ 0xab }, 0, .long, .none }, + + .{ .stosb, .np, &.{}, &.{ 0xaa }, 0, .none, .none }, + .{ .stosw, .np, &.{}, &.{ 0xab }, 0, .short, .none }, + .{ .stosd, .np, &.{}, &.{ 0xab }, 0, .none, .none }, + .{ .stosq, .np, &.{}, &.{ 0xab }, 0, .long, .none }, + + .{ .sub, .zi, &.{ .al, .imm8 }, &.{ 0x2c }, 0, .none, .none }, + .{ .sub, .zi, &.{ .ax, .imm16 }, &.{ 0x2d }, 0, .none, .none }, + .{ .sub, .zi, &.{ .eax, .imm32 }, &.{ 0x2d }, 0, .none, .none }, + .{ .sub, .zi, &.{ .rax, .imm32s }, &.{ 0x2d }, 0, .long, .none }, + .{ .sub, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 5, .none, .none }, + .{ .sub, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 5, .rex, .none }, + .{ .sub, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 5, .none, .none }, + .{ .sub, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 5, .none, .none }, + .{ .sub, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 5, .long, .none }, + .{ .sub, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 5, .none, .none }, + .{ .sub, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 5, .none, .none }, + .{ .sub, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 5, .long, .none }, + .{ .sub, .mr, &.{ .rm8, .r8 }, &.{ 0x28 }, 0, .none, .none }, + .{ .sub, .mr, &.{ .rm8, .r8 }, &.{ 0x28 }, 0, .rex, .none }, + .{ .sub, .mr, &.{ .rm16, .r16 }, &.{ 0x29 }, 0, .none, .none }, + .{ .sub, .mr, &.{ .rm32, .r32 }, &.{ 0x29 }, 0, .none, .none }, + .{ .sub, .mr, &.{ .rm64, .r64 }, &.{ 0x29 }, 0, .long, .none }, + .{ .sub, .rm, &.{ .r8, .rm8 }, &.{ 0x2a }, 0, .none, .none }, + .{ .sub, .rm, &.{ .r8, .rm8 }, &.{ 0x2a }, 0, .rex, .none }, + .{ .sub, .rm, &.{ .r16, .rm16 }, &.{ 0x2b }, 0, .none, .none }, + .{ .sub, .rm, &.{ .r32, .rm32 }, &.{ 0x2b }, 0, .none, .none }, + .{ .sub, .rm, &.{ .r64, .rm64 }, &.{ 0x2b }, 0, .long, .none }, + + .{ .syscall, .np, &.{}, &.{ 0x0f, 0x05 }, 0, .none, .none }, + + .{ .@"test", .zi, &.{ .al, .imm8 }, &.{ 0xa8 }, 0, .none, .none }, + .{ .@"test", .zi, &.{ .ax, .imm16 }, &.{ 0xa9 }, 0, .none, .none }, + .{ .@"test", .zi, &.{ .eax, .imm32 }, &.{ 0xa9 }, 0, .none, .none }, + .{ .@"test", .zi, &.{ .rax, .imm32s }, &.{ 0xa9 }, 0, .long, .none }, + .{ .@"test", .mi, &.{ .rm8, .imm8 }, &.{ 0xf6 }, 0, .none, .none }, + .{ .@"test", .mi, &.{ .rm8, .imm8 }, &.{ 0xf6 }, 0, .rex, .none }, + .{ .@"test", .mi, &.{ .rm16, .imm16 }, &.{ 0xf7 }, 0, .none, .none }, + .{ .@"test", .mi, &.{ .rm32, .imm32 }, &.{ 0xf7 }, 0, .none, .none }, + .{ .@"test", .mi, &.{ .rm64, .imm32s }, &.{ 0xf7 }, 0, .long, .none }, + .{ .@"test", .mr, &.{ .rm8, .r8 }, &.{ 0x84 }, 0, .none, .none }, + .{ .@"test", .mr, &.{ .rm8, .r8 }, &.{ 0x84 }, 0, .rex, .none }, + .{ .@"test", .mr, &.{ .rm16, .r16 }, &.{ 0x85 }, 0, .none, .none }, + .{ .@"test", .mr, &.{ .rm32, .r32 }, &.{ 0x85 }, 0, .none, .none }, + .{ .@"test", .mr, &.{ .rm64, .r64 }, &.{ 0x85 }, 0, .long, .none }, + + .{ .tzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .none, .none }, + .{ .tzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .none, .none }, + .{ .tzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .long, .none }, + + .{ .ud2, .np, &.{}, &.{ 0x0f, 0x0b }, 0, .none, .none }, + + .{ .xadd, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xc0 }, 0, .none, .none }, + .{ .xadd, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xc0 }, 0, .rex, .none }, + .{ .xadd, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xc1 }, 0, .none, .none }, + .{ .xadd, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xc1 }, 0, .none, .none }, + .{ .xadd, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xc1 }, 0, .long, .none }, + + .{ .xchg, .o, &.{ .ax, .r16 }, &.{ 0x90 }, 0, .none, .none }, + .{ .xchg, .o, &.{ .r16, .ax }, &.{ 0x90 }, 0, .none, .none }, + .{ .xchg, .o, &.{ .eax, .r32 }, &.{ 0x90 }, 0, .none, .none }, + .{ .xchg, .o, &.{ .rax, .r64 }, &.{ 0x90 }, 0, .long, .none }, + .{ .xchg, .o, &.{ .r32, .eax }, &.{ 0x90 }, 0, .none, .none }, + .{ .xchg, .o, &.{ .r64, .rax }, &.{ 0x90 }, 0, .long, .none }, + .{ .xchg, .mr, &.{ .rm8, .r8 }, &.{ 0x86 }, 0, .none, .none }, + .{ .xchg, .mr, &.{ .rm8, .r8 }, &.{ 0x86 }, 0, .rex, .none }, + .{ .xchg, .rm, &.{ .r8, .rm8 }, &.{ 0x86 }, 0, .none, .none }, + .{ .xchg, .rm, &.{ .r8, .rm8 }, &.{ 0x86 }, 0, .rex, .none }, + .{ .xchg, .mr, &.{ .rm16, .r16 }, &.{ 0x87 }, 0, .none, .none }, + .{ .xchg, .rm, &.{ .r16, .rm16 }, &.{ 0x87 }, 0, .none, .none }, + .{ .xchg, .mr, &.{ .rm32, .r32 }, &.{ 0x87 }, 0, .none, .none }, + .{ .xchg, .mr, &.{ .rm64, .r64 }, &.{ 0x87 }, 0, .long, .none }, + .{ .xchg, .rm, &.{ .r32, .rm32 }, &.{ 0x87 }, 0, .none, .none }, + .{ .xchg, .rm, &.{ .r64, .rm64 }, &.{ 0x87 }, 0, .long, .none }, + + .{ .xor, .zi, &.{ .al, .imm8 }, &.{ 0x34 }, 0, .none, .none }, + .{ .xor, .zi, &.{ .ax, .imm16 }, &.{ 0x35 }, 0, .none, .none }, + .{ .xor, .zi, &.{ .eax, .imm32 }, &.{ 0x35 }, 0, .none, .none }, + .{ .xor, .zi, &.{ .rax, .imm32s }, &.{ 0x35 }, 0, .long, .none }, + .{ .xor, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 6, .none, .none }, + .{ .xor, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 6, .rex, .none }, + .{ .xor, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 6, .none, .none }, + .{ .xor, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 6, .none, .none }, + .{ .xor, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 6, .long, .none }, + .{ .xor, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 6, .none, .none }, + .{ .xor, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 6, .none, .none }, + .{ .xor, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 6, .long, .none }, + .{ .xor, .mr, &.{ .rm8, .r8 }, &.{ 0x30 }, 0, .none, .none }, + .{ .xor, .mr, &.{ .rm8, .r8 }, &.{ 0x30 }, 0, .rex, .none }, + .{ .xor, .mr, &.{ .rm16, .r16 }, &.{ 0x31 }, 0, .none, .none }, + .{ .xor, .mr, &.{ .rm32, .r32 }, &.{ 0x31 }, 0, .none, .none }, + .{ .xor, .mr, &.{ .rm64, .r64 }, &.{ 0x31 }, 0, .long, .none }, + .{ .xor, .rm, &.{ .r8, .rm8 }, &.{ 0x32 }, 0, .none, .none }, + .{ .xor, .rm, &.{ .r8, .rm8 }, &.{ 0x32 }, 0, .rex, .none }, + .{ .xor, .rm, &.{ .r16, .rm16 }, &.{ 0x33 }, 0, .none, .none }, + .{ .xor, .rm, &.{ .r32, .rm32 }, &.{ 0x33 }, 0, .none, .none }, + .{ .xor, .rm, &.{ .r64, .rm64 }, &.{ 0x33 }, 0, .long, .none }, // SSE - .{ .addss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .sse }, + .{ .addss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .none, .sse }, - .{ .andnps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x55 }, 0, .sse }, + .{ .andnps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x55 }, 0, .none, .sse }, - .{ .andps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x54 }, 0, .sse }, + .{ .andps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x54 }, 0, .none, .sse }, - .{ .cmpss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0xf3, 0x0f, 0xc2 }, 0, .sse }, + .{ .cmpss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0xf3, 0x0f, 0xc2 }, 0, .none, .sse }, - .{ .cvtsi2ss, .rm, &.{ .xmm, .rm32 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .sse }, - .{ .cvtsi2ss, .rm, &.{ .xmm, .rm64 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .sse_long }, + .{ .cvtsi2ss, .rm, &.{ .xmm, .rm32 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .none, .sse }, + .{ .cvtsi2ss, .rm, &.{ .xmm, .rm64 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .long, .sse }, - .{ .divss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5e }, 0, .sse }, + .{ .divss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5e }, 0, .none, .sse }, - .{ .maxss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5f }, 0, .sse }, + .{ .maxss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5f }, 0, .none, .sse }, - .{ .minss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5d }, 0, .sse }, + .{ .minss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5d }, 0, .none, .sse }, - .{ .movaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .sse }, - .{ .movaps, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x29 }, 0, .sse }, + .{ .movaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .none, .sse }, + .{ .movaps, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x29 }, 0, .none, .sse }, - .{ .movss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .sse }, - .{ .movss, .mr, &.{ .xmm_m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .sse }, + .{ .movss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .none, .sse }, + .{ .movss, .mr, &.{ .xmm_m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .none, .sse }, - .{ .movups, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x10 }, 0, .sse }, - .{ .movups, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x11 }, 0, .sse }, + .{ .movups, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x10 }, 0, .none, .sse }, + .{ .movups, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x11 }, 0, .none, .sse }, - .{ .mulss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .sse }, + .{ .mulss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .none, .sse }, - .{ .orps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .sse }, + .{ .orps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .none, .sse }, - .{ .subss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .sse }, + .{ .subss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .none, .sse }, - .{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .sse }, - .{ .sqrtss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .sse }, + .{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .none, .sse }, + .{ .sqrtss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .none, .sse }, - .{ .ucomiss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0x0f, 0x2e }, 0, .sse }, + .{ .ucomiss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0x0f, 0x2e }, 0, .none, .sse }, - .{ .xorps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x57 }, 0, .sse }, + .{ .xorps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x57 }, 0, .none, .sse }, // SSE2 - .{ .addsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x58 }, 0, .sse2 }, + .{ .addsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x58 }, 0, .none, .sse2 }, - .{ .andnpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x55 }, 0, .sse2 }, + .{ .andnpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x55 }, 0, .none, .sse2 }, - .{ .andpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x54 }, 0, .sse2 }, + .{ .andpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x54 }, 0, .none, .sse2 }, - .{ .cmpsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0xf2, 0x0f, 0xc2 }, 0, .sse2 }, + .{ .cmpsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0xf2, 0x0f, 0xc2 }, 0, .none, .sse2 }, - .{ .cvtsd2ss, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .sse2 }, + .{ .cvtsd2ss, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .none, .sse2 }, - .{ .cvtsi2sd, .rm, &.{ .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .sse2 }, - .{ .cvtsi2sd, .rm, &.{ .xmm, .rm64 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .sse2_long }, + .{ .cvtsi2sd, .rm, &.{ .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .none, .sse2 }, + .{ .cvtsi2sd, .rm, &.{ .xmm, .rm64 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .long, .sse2 }, - .{ .cvtss2sd, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5a }, 0, .sse2 }, + .{ .cvtss2sd, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5a }, 0, .none, .sse2 }, - .{ .divsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5e }, 0, .sse2 }, + .{ .divsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5e }, 0, .none, .sse2 }, - .{ .maxsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5f }, 0, .sse2 }, + .{ .maxsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5f }, 0, .none, .sse2 }, - .{ .minsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5d }, 0, .sse2 }, + .{ .minsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5d }, 0, .none, .sse2 }, - .{ .movapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .sse2 }, - .{ .movapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .sse2 }, + .{ .movapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .none, .sse2 }, + .{ .movapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .none, .sse2 }, - .{ .movd, .rm, &.{ .xmm, .rm32 }, &.{ 0x66, 0x0f, 0x6e }, 0, .sse2 }, - .{ .movd, .mr, &.{ .rm32, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .sse2 }, + .{ .movd, .rm, &.{ .xmm, .rm32 }, &.{ 0x66, 0x0f, 0x6e }, 0, .none, .sse2 }, + .{ .movd, .mr, &.{ .rm32, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .none, .sse2 }, - .{ .movq, .rm, &.{ .xmm, .rm64 }, &.{ 0x66, 0x0f, 0x6e }, 0, .sse2_long }, - .{ .movq, .mr, &.{ .rm64, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .sse2_long }, + .{ .movq, .rm, &.{ .xmm, .rm64 }, &.{ 0x66, 0x0f, 0x6e }, 0, .long, .sse2 }, + .{ .movq, .mr, &.{ .rm64, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .long, .sse2 }, - .{ .movq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .sse2 }, - .{ .movq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .sse2 }, + .{ .movq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .none, .sse2 }, + .{ .movq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .none, .sse2 }, - .{ .movupd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x10 }, 0, .sse2 }, - .{ .movupd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x11 }, 0, .sse2 }, + .{ .movupd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x10 }, 0, .none, .sse2 }, + .{ .movupd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x11 }, 0, .none, .sse2 }, - .{ .mulsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x59 }, 0, .sse2 }, + .{ .mulsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x59 }, 0, .none, .sse2 }, - .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .sse2 }, + .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .none, .sse2 }, - .{ .pextrw, .mri, &.{ .r16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .sse2 }, + .{ .pextrw, .mri, &.{ .r16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .none, .sse2 }, - .{ .pinsrw, .rmi, &.{ .xmm, .rm16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .sse2 }, + .{ .pinsrw, .rmi, &.{ .xmm, .rm16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .none, .sse2 }, - .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .sse2 }, - .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .sse2 }, + .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .none, .sse2 }, + .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .none, .sse2 }, - .{ .subsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .sse2 }, + .{ .subsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .none, .sse2 }, - .{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .sse2 }, - .{ .movsd, .mr, &.{ .xmm_m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .sse2 }, + .{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .none, .sse2 }, + .{ .movsd, .mr, &.{ .xmm_m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .none, .sse2 }, - .{ .ucomisd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x2e }, 0, .sse2 }, + .{ .ucomisd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x2e }, 0, .none, .sse2 }, - .{ .xorpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .sse2 }, + .{ .xorpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .none, .sse2 }, // SSE4.1 - .{ .pextrw, .mri, &.{ .rm16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .sse4_1 }, + .{ .pextrw, .mri, &.{ .rm16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .none, .sse4_1 }, - .{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .sse4_1 }, - .{ .roundsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .sse4_1 }, + .{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .none, .sse4_1 }, + .{ .roundsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .none, .sse4_1 }, + + // F16C + .{ .vcvtph2ps, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_128, .f16c }, + + .{ .vcvtps2ph, .mri, &.{ .xmm_m64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x1d }, 0, .vex_128, .f16c }, }; // zig fmt: on diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index b0e717d131..41b0bfc39b 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -168,7 +168,8 @@ test "array to vector" { test "array to vector with element type coercion" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From ae588a09f2c2146ada0f914c7d279f69a0d79396 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 5 May 2023 22:16:13 -0400 Subject: x86_64: implement f16 cmp --- src/arch/x86_64/CodeGen.zig | 53 +- src/arch/x86_64/Encoding.zig | 163 ++--- src/arch/x86_64/Lower.zig | 42 +- src/arch/x86_64/Mir.zig | 80 ++- src/arch/x86_64/encoder.zig | 49 +- src/arch/x86_64/encodings.zig | 1361 ++++++++++++++++++++++------------------- 6 files changed, 989 insertions(+), 759 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b7fd81db68..d24428467a 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -6737,26 +6737,43 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const src_mcv = if (flipped) lhs_mcv else rhs_mcv; - try self.genBinOpMir(switch (ty.zigTypeTag()) { - else => .cmp, + switch (ty.zigTypeTag()) { + else => try self.genBinOpMir(.cmp, ty, dst_mcv, src_mcv), .Float => switch (ty.floatBits(self.target.*)) { - 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) - .ucomiss - else - return self.fail("TODO implement airCmp for {} without sse", .{ - ty.fmt(self.bin_file.options.module.?), - }), - 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) - .ucomisd - else - return self.fail("TODO implement airCmp for {} without sse2", .{ - ty.fmt(self.bin_file.options.module.?), - }), + 16 => if (self.hasFeature(.f16c)) { + const dst_reg = dst_mcv.getReg().?.to128(); + + const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); + const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); + defer self.register_manager.unlockReg(tmp_lock); + + if (src_mcv.isRegister()) + try self.asmRegisterRegisterRegister( + .vpunpcklwd, + dst_reg, + dst_reg, + src_mcv.getReg().?.to128(), + ) + else + try self.asmRegisterMemoryImmediate( + .vpinsrw, + dst_reg, + src_mcv.mem(.word), + Immediate.u(1), + ); + try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg); + try self.asmRegisterRegister(.vmovshdup, tmp_reg, dst_reg); + try self.genBinOpMir(.ucomiss, ty, dst_mcv, .{ .register = tmp_reg }); + } else return self.fail("TODO implement airCmp for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + 32 => try self.genBinOpMir(.ucomiss, ty, dst_mcv, src_mcv), + 64 => try self.genBinOpMir(.ucomisd, ty, dst_mcv, src_mcv), else => return self.fail("TODO implement airCmp for {}", .{ ty.fmt(self.bin_file.options.module.?), }), }, - }, ty, dst_mcv, src_mcv); + } const signedness = if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned; const result = MCValue{ @@ -7834,8 +7851,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr else switch (abi_size) { 2 => return try self.asmRegisterRegisterImmediate( if (dst_reg.class() == .floating_point) .pinsrw else .pextrw, - registerAlias(dst_reg, abi_size), - registerAlias(src_reg, abi_size), + registerAlias(dst_reg, 4), + registerAlias(src_reg, 4), Immediate.u(0), ), 4 => .movd, @@ -8045,7 +8062,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal try self.asmMemoryRegisterImmediate( .pextrw, dst_mem, - registerAlias(src_reg, abi_size), + src_reg.to128(), Immediate.u(0), ) else diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 05c48ecddf..ada1e891fb 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -58,9 +58,9 @@ pub fn findByMnemonic( var shortest_len: ?usize = null; next: for (mnemonic_to_encodings_map[@enumToInt(mnemonic)]) |data| { switch (data.mode) { - .rex => if (!rex_required) continue, - .long => {}, - else => if (rex_required) continue, + .none, .short => if (rex_required) continue, + .rex, .rex_short => if (!rex_required) continue, + else => {}, } for (input_ops, data.ops) |input_op, data_op| if (!input_op.isSubset(data_op)) continue :next; @@ -90,24 +90,26 @@ pub fn findByOpcode(opc: []const u8, prefixes: struct { if (!std.mem.eql(u8, opc, enc.opcode())) continue; if (prefixes.rex.w) { switch (data.mode) { - .short, .fpu, .sse, .sse2, .sse4_1, .none => continue, - .long, .sse_long, .sse2_long, .rex => {}, + .none, .short, .rex, .rex_short, .vex_128, .vex_256 => continue, + .long, .vex_128_long, .vex_256_long => {}, } } else if (prefixes.rex.present and !prefixes.rex.isSet()) { switch (data.mode) { - .rex => {}, + .rex, .rex_short => {}, else => continue, } } else if (prefixes.legacy.prefix_66) { - switch (enc.operandBitSize()) { - 16 => {}, - else => continue, + switch (data.mode) { + .short, .rex_short => {}, + .none, .rex, .vex_128, .vex_256 => continue, + .long, .vex_128_long, .vex_256_long => continue, } } else { switch (data.mode) { - .none => switch (enc.operandBitSize()) { - 16 => continue, - else => {}, + .none => switch (data.mode) { + .short, .rex_short => continue, + .none, .rex, .vex_128, .vex_256 => {}, + .long, .vex_128_long, .vex_256_long => {}, }, else => continue, } @@ -131,28 +133,11 @@ pub fn mandatoryPrefix(encoding: *const Encoding) ?u8 { pub fn modRmExt(encoding: Encoding) u3 { return switch (encoding.data.op_en) { - .m, .mi, .m1, .mc => encoding.data.modrm_ext, + .m, .mi, .m1, .mc, .vmi => encoding.data.modrm_ext, else => unreachable, }; } -pub fn operandBitSize(encoding: Encoding) u64 { - return switch (encoding.data.mode) { - .short => 16, - .long => 64, - else => switch (encoding.data.op_en) { - .np => switch (encoding.data.ops[0]) { - .o16 => 16, - .o32 => 32, - .o64 => 64, - else => 32, - }, - .td => encoding.data.ops[1].bitSize(), - else => encoding.data.ops[0].bitSize(), - }, - }; -} - pub fn format( encoding: Encoding, comptime fmt: []const u8, @@ -220,17 +205,17 @@ pub fn format( }; try writer.print("+{s} ", .{tag}); }, - .m, .mi, .m1, .mc => try writer.print("/{d} ", .{encoding.modRmExt()}), - .mr, .rm, .rmi, .mri, .mrc, .rrm, .rrmi => try writer.writeAll("/r "), + .m, .mi, .m1, .mc, .vmi => try writer.print("/{d} ", .{encoding.modRmExt()}), + .mr, .rm, .rmi, .mri, .mrc, .rvm, .rvmi => try writer.writeAll("/r "), } switch (encoding.data.op_en) { - .i, .d, .zi, .oi, .mi, .rmi, .mri, .rrmi => { + .i, .d, .zi, .oi, .mi, .rmi, .mri, .vmi, .rvmi => { const op = switch (encoding.data.op_en) { .i, .d => encoding.data.ops[0], .zi, .oi, .mi => encoding.data.ops[1], - .rmi, .mri => encoding.data.ops[2], - .rrmi => encoding.data.ops[3], + .rmi, .mri, .vmi => encoding.data.ops[2], + .rvmi => encoding.data.ops[3], else => unreachable, }; const tag = switch (op) { @@ -245,7 +230,7 @@ pub fn format( }; try writer.print("{s} ", .{tag}); }, - .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm, .mrc, .rrm => {}, + .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm, .mrc, .rvm => {}, } try writer.print("{s} ", .{@tagName(encoding.mnemonic)}); @@ -315,8 +300,7 @@ pub const Mnemonic = enum { movaps, movss, movups, mulss, orps, - pextrw, - pinsrw, + pextrw, pinsrw, sqrtps, sqrtss, subss, @@ -335,14 +319,25 @@ pub const Mnemonic = enum { movupd, mulsd, orpd, - sqrtpd, - sqrtsd, + pshufhw, pshuflw, + psrld, psrlq, psrlw, + punpckhbw, punpckhdq, punpckhqdq, punpckhwd, + punpcklbw, punpckldq, punpcklqdq, punpcklwd, + sqrtpd, sqrtsd, subsd, ucomisd, xorpd, + // SSE3 + movddup, movshdup, movsldup, // SSE4.1 - roundss, - roundsd, + roundsd, roundss, + // AVX + vmovddup, vmovshdup, vmovsldup, + vpextrw, vpinsrw, + vpshufhw, vpshuflw, + vpsrld, vpsrlq, vpsrlw, + vpunpckhbw, vpunpckhdq, vpunpckhqdq, vpunpckhwd, + vpunpcklbw, vpunpckldq, vpunpcklqdq, vpunpcklwd, // F16C vcvtph2ps, vcvtps2ph, // zig fmt: on @@ -357,7 +352,7 @@ pub const OpEn = enum { fd, td, m1, mc, mi, mr, rm, rmi, mri, mrc, - rrm, rrmi, + vmi, rvm, rvmi, // zig fmt: on }; @@ -372,6 +367,7 @@ pub const Op = enum { cl, r8, r16, r32, r64, rm8, rm16, rm32, rm64, + r32_m16, r64_m16, m8, m16, m32, m64, m80, m128, rel8, rel16, rel32, m, @@ -450,16 +446,49 @@ pub const Op = enum { } } - pub fn bitSize(op: Op) u64 { + pub fn immBitSize(op: Op) u64 { return switch (op) { .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, + .al, .cl, .r8, .rm8 => unreachable, + .ax, .r16, .rm16 => unreachable, + .eax, .r32, .rm32, .r32_m16 => unreachable, + .rax, .r64, .rm64, .r64_m16 => unreachable, + .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => unreachable, + .m8, .m16, .m32, .m64, .m80, .m128 => unreachable, .unity => 1, - .imm8, .imm8s, .al, .cl, .r8, .m8, .rm8, .rel8 => 8, - .imm16, .imm16s, .ax, .r16, .m16, .rm16, .rel16 => 16, - .imm32, .imm32s, .eax, .r32, .m32, .rm32, .rel32, .xmm_m32 => 32, - .imm64, .rax, .r64, .m64, .rm64, .xmm_m64 => 64, + .imm8, .imm8s, .rel8 => 8, + .imm16, .imm16s, .rel16 => 16, + .imm32, .imm32s, .rel32 => 32, + .imm64 => 64, + }; + } + + pub fn regBitSize(op: Op) u64 { + return switch (op) { + .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, + .unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable, + .rel8, .rel16, .rel32 => unreachable, + .m8, .m16, .m32, .m64, .m80, .m128 => unreachable, + .al, .cl, .r8, .rm8 => 8, + .ax, .r16, .rm16 => 16, + .eax, .r32, .rm32, .r32_m16 => 32, + .rax, .r64, .rm64, .r64_m16 => 64, + .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => 128, + }; + } + + pub fn memBitSize(op: Op) u64 { + return switch (op) { + .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, + .unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable, + .rel8, .rel16, .rel32 => unreachable, + .al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64, .xmm => unreachable, + .m8, .rm8 => 8, + .m16, .rm16, .r32_m16, .r64_m16 => 16, + .m32, .rm32, .xmm_m32 => 32, + .m64, .rm64, .xmm_m64 => 64, .m80 => 80, - .m128, .xmm, .xmm_m128 => 128, + .m128, .xmm_m128 => 128, }; } @@ -482,6 +511,7 @@ pub const Op = enum { .al, .ax, .eax, .rax, .r8, .r16, .r32, .r64, .rm8, .rm16, .rm32, .rm64, + .r32_m16, .r64_m16, .xmm, .xmm_m32, .xmm_m64, .xmm_m128, => true, else => false, @@ -506,6 +536,7 @@ pub const Op = enum { // zig fmt: off return switch (op) { .rm8, .rm16, .rm32, .rm64, + .r32_m16, .r64_m16, .m8, .m16, .m32, .m64, .m80, .m128, .m, .xmm_m32, .xmm_m64, .xmm_m128, @@ -528,18 +559,12 @@ pub const Op = enum { .al, .ax, .eax, .rax, .cl => .general_purpose, .r8, .r16, .r32, .r64 => .general_purpose, .rm8, .rm16, .rm32, .rm64 => .general_purpose, + .r32_m16, .r64_m16 => .general_purpose, .sreg => .segment, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => .floating_point, }; } - pub fn isFloatingPointRegister(op: Op) bool { - return switch (op) { - .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => true, - else => false, - }; - } - /// Given an operand `op` checks if `target` is a subset for the purposes of the encoding. pub fn isSubset(op: Op, target: Op) bool { switch (op) { @@ -553,30 +578,27 @@ pub const Op = enum { if (op.isRegister() and target.isRegister()) { return switch (target) { .cl, .al, .ax, .eax, .rax => op == target, - else => op.class() == target.class() and switch (target.class()) { - .floating_point => true, - else => op.bitSize() == target.bitSize(), - }, + else => op.class() == target.class() and op.regBitSize() == target.regBitSize(), }; } if (op.isMemory() and target.isMemory()) { switch (target) { .m => return true, - else => return op.bitSize() == target.bitSize(), + else => return op.memBitSize() == target.memBitSize(), } } if (op.isImmediate() and target.isImmediate()) { switch (target) { - .imm64 => if (op.bitSize() <= 64) return true, - .imm32s, .rel32 => if (op.bitSize() < 32 or (op.bitSize() == 32 and op.isSigned())) + .imm64 => if (op.immBitSize() <= 64) return true, + .imm32s, .rel32 => if (op.immBitSize() < 32 or (op.immBitSize() == 32 and op.isSigned())) return true, - .imm32 => if (op.bitSize() <= 32) return true, - .imm16s, .rel16 => if (op.bitSize() < 16 or (op.bitSize() == 16 and op.isSigned())) + .imm32 => if (op.immBitSize() <= 32) return true, + .imm16s, .rel16 => if (op.immBitSize() < 16 or (op.immBitSize() == 16 and op.isSigned())) return true, - .imm16 => if (op.bitSize() <= 16) return true, - .imm8s, .rel8 => if (op.bitSize() < 8 or (op.bitSize() == 8 and op.isSigned())) + .imm16 => if (op.immBitSize() <= 16) return true, + .imm8s, .rel8 => if (op.immBitSize() < 8 or (op.immBitSize() == 8 and op.isSigned())) return true, - .imm8 => if (op.bitSize() <= 8) return true, + .imm8 => if (op.immBitSize() <= 8) return true, else => {}, } return op == target; @@ -590,8 +612,9 @@ pub const Op = enum { pub const Mode = enum { none, short, - rex, long, + rex, + rex_short, vex_128, vex_128_long, vex_256, @@ -600,9 +623,11 @@ pub const Mode = enum { pub const Feature = enum { none, + avx, f16c, sse, sse2, + sse3, sse4_1, x87, }; diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 9571f50e7c..d9482d4b39 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -108,12 +108,12 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .orps, .pextrw, .pinsrw, - .roundss, .sqrtps, .sqrtss, .subss, .ucomiss, .xorps, + .addsd, .andnpd, .andpd, @@ -127,13 +127,51 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .movsd, .mulsd, .orpd, - .roundsd, + .pshufhw, + .pshuflw, + .psrld, + .psrlq, + .psrlw, + .punpckhbw, + .punpckhdq, + .punpckhqdq, + .punpckhwd, + .punpcklbw, + .punpckldq, + .punpcklqdq, + .punpcklwd, .sqrtpd, .sqrtsd, .subsd, .ucomisd, .xorpd, + .movddup, + .movshdup, + .movsldup, + + .roundsd, + .roundss, + + .vmovddup, + .vmovshdup, + .vmovsldup, + .vpextrw, + .vpinsrw, + .vpshufhw, + .vpshuflw, + .vpsrld, + .vpsrlq, + .vpsrlw, + .vpunpckhbw, + .vpunpckhdq, + .vpunpckhqdq, + .vpunpckhwd, + .vpunpcklbw, + .vpunpckldq, + .vpunpcklqdq, + .vpunpcklwd, + .vcvtph2ps, .vcvtps2ph, => try lower.mirGeneric(inst), diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index c4e19fdc0e..9e39d23bd4 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -196,8 +196,6 @@ pub const Inst = struct { pextrw, /// Insert word pinsrw, - /// Round scalar single-precision floating-point values - roundss, /// Square root of scalar single precision floating-point value sqrtps, /// Subtract scalar single-precision floating-point values @@ -208,6 +206,7 @@ pub const Inst = struct { ucomiss, /// Bitwise logical xor of packed single precision floating-point values xorps, + /// Add double precision floating point values addsd, /// Bitwise logical and not of packed double precision floating-point values @@ -234,8 +233,32 @@ pub const Inst = struct { mulsd, /// Bitwise logical or of packed double precision floating-point values orpd, - /// Round scalar double-precision floating-point values - roundsd, + /// Shuffle packed high words + pshufhw, + /// Shuffle packed low words + pshuflw, + /// Shift packed data right logical + psrld, + /// Shift packed data right logical + psrlq, + /// Shift packed data right logical + psrlw, + /// Unpack high data + punpckhbw, + /// Unpack high data + punpckhdq, + /// Unpack high data + punpckhqdq, + /// Unpack high data + punpckhwd, + /// Unpack low data + punpcklbw, + /// Unpack low data + punpckldq, + /// Unpack low data + punpcklqdq, + /// Unpack low data + punpcklwd, /// Square root of double precision floating-point values sqrtpd, /// Square root of scalar double precision floating-point value @@ -247,6 +270,55 @@ pub const Inst = struct { /// Bitwise logical xor of packed double precision floating-point values xorpd, + /// Replicate double floating-point values + movddup, + /// Replicate single floating-point values + movshdup, + /// Replicate single floating-point values + movsldup, + + /// Round scalar double-precision floating-point values + roundsd, + /// Round scalar single-precision floating-point values + roundss, + + /// Replicate double floating-point values + vmovddup, + /// Replicate single floating-point values + vmovshdup, + /// Replicate single floating-point values + vmovsldup, + /// Extract word + vpextrw, + /// Insert word + vpinsrw, + /// Shuffle packed high words + vpshufhw, + /// Shuffle packed low words + vpshuflw, + /// Shift packed data right logical + vpsrld, + /// Shift packed data right logical + vpsrlq, + /// Shift packed data right logical + vpsrlw, + /// Unpack high data + vpunpckhbw, + /// Unpack high data + vpunpckhdq, + /// Unpack high data + vpunpckhqdq, + /// Unpack high data + vpunpckhwd, + /// Unpack low data + vpunpcklbw, + /// Unpack low data + vpunpckldq, + /// Unpack low data + vpunpcklqdq, + /// Unpack low data + vpunpcklwd, + /// Convert 16-bit floating-point values to single-precision floating-point values vcvtph2ps, /// Convert single-precision floating-point values to 16-bit floating-point values diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index 94f4eb56d5..495edb5f2a 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -151,15 +151,12 @@ pub const Instruction = struct { moffs.offset, }), }, - .imm => |imm| try writer.print("0x{x}", .{imm.asUnsigned(enc_op.bitSize())}), + .imm => |imm| try writer.print("0x{x}", .{imm.asUnsigned(enc_op.immBitSize())}), } } pub fn fmtPrint(op: Operand, enc_op: Encoding.Op) std.fmt.Formatter(fmt) { - return .{ .data = .{ - .op = op, - .enc_op = enc_op, - } }; + return .{ .data = .{ .op = op, .enc_op = enc_op } }; } }; @@ -210,7 +207,7 @@ pub const Instruction = struct { const data = enc.data; switch (data.mode) { - .none, .short, .rex, .long => { + .none, .short, .long, .rex, .rex_short => { try inst.encodeLegacyPrefixes(encoder); try inst.encodeMandatoryPrefix(encoder); try inst.encodeRexPrefix(encoder); @@ -232,15 +229,16 @@ pub const Instruction = struct { else => { const mem_op = switch (data.op_en) { .m, .mi, .m1, .mc, .mr, .mri, .mrc => inst.ops[0], - .rm, .rmi => inst.ops[1], + .rm, .rmi, .vmi => inst.ops[1], + .rvm, .rvmi => inst.ops[2], else => unreachable, }; switch (mem_op) { .reg => |reg| { const rm = switch (data.op_en) { - .m, .mi, .m1, .mc => enc.modRmExt(), + .m, .mi, .m1, .mc, .vmi => enc.modRmExt(), .mr, .mri, .mrc => inst.ops[1].reg.lowEnc(), - .rm, .rmi => inst.ops[0].reg.lowEnc(), + .rm, .rmi, .rvm, .rvmi => inst.ops[0].reg.lowEnc(), else => unreachable, }; try encoder.modRm_direct(rm, reg.lowEnc()); @@ -259,7 +257,8 @@ pub const Instruction = struct { switch (data.op_en) { .mi => try encodeImm(inst.ops[1].imm, data.ops[1], encoder), - .rmi, .mri => try encodeImm(inst.ops[2].imm, data.ops[2], encoder), + .rmi, .mri, .vmi => try encodeImm(inst.ops[2].imm, data.ops[2], encoder), + .rvmi => try encodeImm(inst.ops[3].imm, data.ops[3], encoder), else => {}, } }, @@ -291,11 +290,9 @@ pub const Instruction = struct { .rep, .repe, .repz => legacy.prefix_f3 = true, } - if (data.mode == .none) { - const bit_size = enc.operandBitSize(); - if (bit_size == 16) { - legacy.set16BitOverride(); - } + switch (data.mode) { + .short, .rex_short => legacy.set16BitOverride(), + else => {}, } const segment_override: ?Register = switch (op_en) { @@ -318,7 +315,7 @@ pub const Instruction = struct { } else null, - .rrm, .rrmi => unreachable, + .vmi, .rvm, .rvmi => unreachable, }; if (segment_override) |seg| { legacy.setSegmentOverride(seg); @@ -353,7 +350,7 @@ pub const Instruction = struct { rex.b = b_x_op.isBaseExtended(); rex.x = b_x_op.isIndexExtended(); }, - .rrm, .rrmi => unreachable, + .vmi, .rvm, .rvmi => unreachable, } try encoder.rex(rex); @@ -375,18 +372,19 @@ pub const Instruction = struct { switch (op_en) { .np, .i, .zi, .fd, .td, .d => {}, .o, .oi => vex.b = inst.ops[0].reg.isExtended(), - .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc, .rrm, .rrmi => { + .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc, .vmi, .rvm, .rvmi => { const r_op = switch (op_en) { - .rm, .rmi, .rrm, .rrmi => inst.ops[0], + .rm, .rmi, .rvm, .rvmi => inst.ops[0], .mr, .mri, .mrc => inst.ops[1], - else => .none, + .m, .mi, .m1, .mc, .vmi => .none, + else => unreachable, }; vex.r = r_op.isBaseExtended(); const b_x_op = switch (op_en) { - .rm, .rmi => inst.ops[1], + .rm, .rmi, .vmi => inst.ops[1], .m, .mi, .m1, .mc, .mr, .mri, .mrc => inst.ops[0], - .rrm, .rrmi => inst.ops[2], + .rvm, .rvmi => inst.ops[2], else => unreachable, }; vex.b = b_x_op.isBaseExtended(); @@ -417,7 +415,8 @@ pub const Instruction = struct { switch (op_en) { else => {}, - .rrm, .rrmi => vex.v = inst.ops[1].reg, + .vmi => vex.v = inst.ops[0].reg, + .rvm, .rvmi => vex.v = inst.ops[1].reg, } try encoder.vex(vex); @@ -515,8 +514,8 @@ pub const Instruction = struct { } fn encodeImm(imm: Immediate, kind: Encoding.Op, encoder: anytype) !void { - const raw = imm.asUnsigned(kind.bitSize()); - switch (kind.bitSize()) { + const raw = imm.asUnsigned(kind.immBitSize()); + switch (kind.immBitSize()) { 8 => try encoder.imm8(@intCast(u8, raw)), 16 => try encoder.imm16(@intCast(u16, raw)), 32 => try encoder.imm32(@intCast(u32, raw)), diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 52b8cc29d6..5d2630e9a8 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -13,264 +13,264 @@ pub const Entry = struct { Mnemonic, OpEn, []const Op, []const u8, modrm_ext, Mo // zig fmt: off pub const table = [_]Entry{ // General-purpose - .{ .adc, .zi, &.{ .al, .imm8 }, &.{ 0x14 }, 0, .none, .none }, - .{ .adc, .zi, &.{ .ax, .imm16 }, &.{ 0x15 }, 0, .none, .none }, - .{ .adc, .zi, &.{ .eax, .imm32 }, &.{ 0x15 }, 0, .none, .none }, - .{ .adc, .zi, &.{ .rax, .imm32s }, &.{ 0x15 }, 0, .long, .none }, - .{ .adc, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 2, .none, .none }, - .{ .adc, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 2, .rex, .none }, - .{ .adc, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 2, .none, .none }, - .{ .adc, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 2, .none, .none }, - .{ .adc, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 2, .long, .none }, - .{ .adc, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 2, .none, .none }, - .{ .adc, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 2, .none, .none }, - .{ .adc, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 2, .long, .none }, - .{ .adc, .mr, &.{ .rm8, .r8 }, &.{ 0x10 }, 0, .none, .none }, - .{ .adc, .mr, &.{ .rm8, .r8 }, &.{ 0x10 }, 0, .rex, .none }, - .{ .adc, .mr, &.{ .rm16, .r16 }, &.{ 0x11 }, 0, .none, .none }, - .{ .adc, .mr, &.{ .rm32, .r32 }, &.{ 0x11 }, 0, .none, .none }, - .{ .adc, .mr, &.{ .rm64, .r64 }, &.{ 0x11 }, 0, .long, .none }, - .{ .adc, .rm, &.{ .r8, .rm8 }, &.{ 0x12 }, 0, .none, .none }, - .{ .adc, .rm, &.{ .r8, .rm8 }, &.{ 0x12 }, 0, .rex, .none }, - .{ .adc, .rm, &.{ .r16, .rm16 }, &.{ 0x13 }, 0, .none, .none }, - .{ .adc, .rm, &.{ .r32, .rm32 }, &.{ 0x13 }, 0, .none, .none }, - .{ .adc, .rm, &.{ .r64, .rm64 }, &.{ 0x13 }, 0, .long, .none }, - - .{ .add, .zi, &.{ .al, .imm8 }, &.{ 0x04 }, 0, .none, .none }, - .{ .add, .zi, &.{ .ax, .imm16 }, &.{ 0x05 }, 0, .none, .none }, - .{ .add, .zi, &.{ .eax, .imm32 }, &.{ 0x05 }, 0, .none, .none }, - .{ .add, .zi, &.{ .rax, .imm32s }, &.{ 0x05 }, 0, .long, .none }, - .{ .add, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 0, .none, .none }, - .{ .add, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 0, .rex, .none }, - .{ .add, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 0, .none, .none }, - .{ .add, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 0, .none, .none }, - .{ .add, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 0, .long, .none }, - .{ .add, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 0, .none, .none }, - .{ .add, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 0, .none, .none }, - .{ .add, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 0, .long, .none }, - .{ .add, .mr, &.{ .rm8, .r8 }, &.{ 0x00 }, 0, .none, .none }, - .{ .add, .mr, &.{ .rm8, .r8 }, &.{ 0x00 }, 0, .rex, .none }, - .{ .add, .mr, &.{ .rm16, .r16 }, &.{ 0x01 }, 0, .none, .none }, - .{ .add, .mr, &.{ .rm32, .r32 }, &.{ 0x01 }, 0, .none, .none }, - .{ .add, .mr, &.{ .rm64, .r64 }, &.{ 0x01 }, 0, .long, .none }, - .{ .add, .rm, &.{ .r8, .rm8 }, &.{ 0x02 }, 0, .none, .none }, - .{ .add, .rm, &.{ .r8, .rm8 }, &.{ 0x02 }, 0, .rex, .none }, - .{ .add, .rm, &.{ .r16, .rm16 }, &.{ 0x03 }, 0, .none, .none }, - .{ .add, .rm, &.{ .r32, .rm32 }, &.{ 0x03 }, 0, .none, .none }, - .{ .add, .rm, &.{ .r64, .rm64 }, &.{ 0x03 }, 0, .long, .none }, - - .{ .@"and", .zi, &.{ .al, .imm8 }, &.{ 0x24 }, 0, .none, .none }, - .{ .@"and", .zi, &.{ .ax, .imm16 }, &.{ 0x25 }, 0, .none, .none }, - .{ .@"and", .zi, &.{ .eax, .imm32 }, &.{ 0x25 }, 0, .none, .none }, - .{ .@"and", .zi, &.{ .rax, .imm32s }, &.{ 0x25 }, 0, .long, .none }, - .{ .@"and", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 4, .none, .none }, - .{ .@"and", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 4, .rex, .none }, - .{ .@"and", .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 4, .none, .none }, - .{ .@"and", .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 4, .none, .none }, - .{ .@"and", .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 4, .long, .none }, - .{ .@"and", .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 4, .none, .none }, - .{ .@"and", .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 4, .none, .none }, - .{ .@"and", .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 4, .long, .none }, - .{ .@"and", .mr, &.{ .rm8, .r8 }, &.{ 0x20 }, 0, .none, .none }, - .{ .@"and", .mr, &.{ .rm8, .r8 }, &.{ 0x20 }, 0, .rex, .none }, - .{ .@"and", .mr, &.{ .rm16, .r16 }, &.{ 0x21 }, 0, .none, .none }, - .{ .@"and", .mr, &.{ .rm32, .r32 }, &.{ 0x21 }, 0, .none, .none }, - .{ .@"and", .mr, &.{ .rm64, .r64 }, &.{ 0x21 }, 0, .long, .none }, - .{ .@"and", .rm, &.{ .r8, .rm8 }, &.{ 0x22 }, 0, .none, .none }, - .{ .@"and", .rm, &.{ .r8, .rm8 }, &.{ 0x22 }, 0, .rex, .none }, - .{ .@"and", .rm, &.{ .r16, .rm16 }, &.{ 0x23 }, 0, .none, .none }, - .{ .@"and", .rm, &.{ .r32, .rm32 }, &.{ 0x23 }, 0, .none, .none }, - .{ .@"and", .rm, &.{ .r64, .rm64 }, &.{ 0x23 }, 0, .long, .none }, - - .{ .bsf, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0xbc }, 0, .none, .none }, - .{ .bsf, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0xbc }, 0, .none, .none }, - .{ .bsf, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0xbc }, 0, .long, .none }, - - .{ .bsr, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0xbd }, 0, .none, .none }, - .{ .bsr, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0xbd }, 0, .none, .none }, - .{ .bsr, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0xbd }, 0, .long, .none }, + .{ .adc, .zi, &.{ .al, .imm8 }, &.{ 0x14 }, 0, .none, .none }, + .{ .adc, .zi, &.{ .ax, .imm16 }, &.{ 0x15 }, 0, .short, .none }, + .{ .adc, .zi, &.{ .eax, .imm32 }, &.{ 0x15 }, 0, .none, .none }, + .{ .adc, .zi, &.{ .rax, .imm32s }, &.{ 0x15 }, 0, .long, .none }, + .{ .adc, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 2, .none, .none }, + .{ .adc, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 2, .rex, .none }, + .{ .adc, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 2, .short, .none }, + .{ .adc, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 2, .none, .none }, + .{ .adc, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 2, .long, .none }, + .{ .adc, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 2, .short, .none }, + .{ .adc, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 2, .none, .none }, + .{ .adc, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 2, .long, .none }, + .{ .adc, .mr, &.{ .rm8, .r8 }, &.{ 0x10 }, 0, .none, .none }, + .{ .adc, .mr, &.{ .rm8, .r8 }, &.{ 0x10 }, 0, .rex, .none }, + .{ .adc, .mr, &.{ .rm16, .r16 }, &.{ 0x11 }, 0, .short, .none }, + .{ .adc, .mr, &.{ .rm32, .r32 }, &.{ 0x11 }, 0, .none, .none }, + .{ .adc, .mr, &.{ .rm64, .r64 }, &.{ 0x11 }, 0, .long, .none }, + .{ .adc, .rm, &.{ .r8, .rm8 }, &.{ 0x12 }, 0, .none, .none }, + .{ .adc, .rm, &.{ .r8, .rm8 }, &.{ 0x12 }, 0, .rex, .none }, + .{ .adc, .rm, &.{ .r16, .rm16 }, &.{ 0x13 }, 0, .short, .none }, + .{ .adc, .rm, &.{ .r32, .rm32 }, &.{ 0x13 }, 0, .none, .none }, + .{ .adc, .rm, &.{ .r64, .rm64 }, &.{ 0x13 }, 0, .long, .none }, + + .{ .add, .zi, &.{ .al, .imm8 }, &.{ 0x04 }, 0, .none, .none }, + .{ .add, .zi, &.{ .ax, .imm16 }, &.{ 0x05 }, 0, .short, .none }, + .{ .add, .zi, &.{ .eax, .imm32 }, &.{ 0x05 }, 0, .none, .none }, + .{ .add, .zi, &.{ .rax, .imm32s }, &.{ 0x05 }, 0, .long, .none }, + .{ .add, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 0, .none, .none }, + .{ .add, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 0, .rex, .none }, + .{ .add, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 0, .short, .none }, + .{ .add, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 0, .none, .none }, + .{ .add, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 0, .long, .none }, + .{ .add, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 0, .short, .none }, + .{ .add, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 0, .none, .none }, + .{ .add, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 0, .long, .none }, + .{ .add, .mr, &.{ .rm8, .r8 }, &.{ 0x00 }, 0, .none, .none }, + .{ .add, .mr, &.{ .rm8, .r8 }, &.{ 0x00 }, 0, .rex, .none }, + .{ .add, .mr, &.{ .rm16, .r16 }, &.{ 0x01 }, 0, .short, .none }, + .{ .add, .mr, &.{ .rm32, .r32 }, &.{ 0x01 }, 0, .none, .none }, + .{ .add, .mr, &.{ .rm64, .r64 }, &.{ 0x01 }, 0, .long, .none }, + .{ .add, .rm, &.{ .r8, .rm8 }, &.{ 0x02 }, 0, .none, .none }, + .{ .add, .rm, &.{ .r8, .rm8 }, &.{ 0x02 }, 0, .rex, .none }, + .{ .add, .rm, &.{ .r16, .rm16 }, &.{ 0x03 }, 0, .short, .none }, + .{ .add, .rm, &.{ .r32, .rm32 }, &.{ 0x03 }, 0, .none, .none }, + .{ .add, .rm, &.{ .r64, .rm64 }, &.{ 0x03 }, 0, .long, .none }, + + .{ .@"and", .zi, &.{ .al, .imm8 }, &.{ 0x24 }, 0, .none, .none }, + .{ .@"and", .zi, &.{ .ax, .imm16 }, &.{ 0x25 }, 0, .short, .none }, + .{ .@"and", .zi, &.{ .eax, .imm32 }, &.{ 0x25 }, 0, .none, .none }, + .{ .@"and", .zi, &.{ .rax, .imm32s }, &.{ 0x25 }, 0, .long, .none }, + .{ .@"and", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 4, .none, .none }, + .{ .@"and", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 4, .rex, .none }, + .{ .@"and", .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 4, .short, .none }, + .{ .@"and", .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 4, .none, .none }, + .{ .@"and", .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 4, .long, .none }, + .{ .@"and", .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 4, .short, .none }, + .{ .@"and", .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 4, .none, .none }, + .{ .@"and", .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 4, .long, .none }, + .{ .@"and", .mr, &.{ .rm8, .r8 }, &.{ 0x20 }, 0, .none, .none }, + .{ .@"and", .mr, &.{ .rm8, .r8 }, &.{ 0x20 }, 0, .rex, .none }, + .{ .@"and", .mr, &.{ .rm16, .r16 }, &.{ 0x21 }, 0, .short, .none }, + .{ .@"and", .mr, &.{ .rm32, .r32 }, &.{ 0x21 }, 0, .none, .none }, + .{ .@"and", .mr, &.{ .rm64, .r64 }, &.{ 0x21 }, 0, .long, .none }, + .{ .@"and", .rm, &.{ .r8, .rm8 }, &.{ 0x22 }, 0, .none, .none }, + .{ .@"and", .rm, &.{ .r8, .rm8 }, &.{ 0x22 }, 0, .rex, .none }, + .{ .@"and", .rm, &.{ .r16, .rm16 }, &.{ 0x23 }, 0, .short, .none }, + .{ .@"and", .rm, &.{ .r32, .rm32 }, &.{ 0x23 }, 0, .none, .none }, + .{ .@"and", .rm, &.{ .r64, .rm64 }, &.{ 0x23 }, 0, .long, .none }, + + .{ .bsf, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0xbc }, 0, .short, .none }, + .{ .bsf, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0xbc }, 0, .none, .none }, + .{ .bsf, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0xbc }, 0, .long, .none }, + + .{ .bsr, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0xbd }, 0, .short, .none }, + .{ .bsr, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0xbd }, 0, .none, .none }, + .{ .bsr, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0xbd }, 0, .long, .none }, .{ .bswap, .o, &.{ .r32 }, &.{ 0x0f, 0xc8 }, 0, .none, .none }, .{ .bswap, .o, &.{ .r64 }, &.{ 0x0f, 0xc8 }, 0, .long, .none }, - .{ .bt, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xa3 }, 0, .none, .none }, - .{ .bt, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xa3 }, 0, .none, .none }, - .{ .bt, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xa3 }, 0, .long, .none }, - .{ .bt, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 4, .none, .none }, - .{ .bt, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 4, .none, .none }, - .{ .bt, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 4, .long, .none }, - - .{ .btc, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xbb }, 0, .none, .none }, - .{ .btc, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xbb }, 0, .none, .none }, - .{ .btc, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xbb }, 0, .long, .none }, - .{ .btc, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 7, .none, .none }, - .{ .btc, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 7, .none, .none }, - .{ .btc, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 7, .long, .none }, - - .{ .btr, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xb3 }, 0, .none, .none }, - .{ .btr, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xb3 }, 0, .none, .none }, - .{ .btr, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xb3 }, 0, .long, .none }, - .{ .btr, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 6, .none, .none }, - .{ .btr, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 6, .none, .none }, - .{ .btr, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 6, .long, .none }, - - .{ .bts, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xab }, 0, .none, .none }, - .{ .bts, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xab }, 0, .none, .none }, - .{ .bts, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xab }, 0, .long, .none }, - .{ .bts, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 5, .none, .none }, - .{ .bts, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 5, .none, .none }, - .{ .bts, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 5, .long, .none }, + .{ .bt, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xa3 }, 0, .short, .none }, + .{ .bt, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xa3 }, 0, .none, .none }, + .{ .bt, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xa3 }, 0, .long, .none }, + .{ .bt, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 4, .short, .none }, + .{ .bt, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 4, .none, .none }, + .{ .bt, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 4, .long, .none }, + + .{ .btc, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xbb }, 0, .short, .none }, + .{ .btc, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xbb }, 0, .none, .none }, + .{ .btc, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xbb }, 0, .long, .none }, + .{ .btc, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 7, .short, .none }, + .{ .btc, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 7, .none, .none }, + .{ .btc, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 7, .long, .none }, + + .{ .btr, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xb3 }, 0, .short, .none }, + .{ .btr, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xb3 }, 0, .none, .none }, + .{ .btr, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xb3 }, 0, .long, .none }, + .{ .btr, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 6, .short, .none }, + .{ .btr, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 6, .none, .none }, + .{ .btr, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 6, .long, .none }, + + .{ .bts, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xab }, 0, .short, .none }, + .{ .bts, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xab }, 0, .none, .none }, + .{ .bts, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xab }, 0, .long, .none }, + .{ .bts, .mi, &.{ .rm16, .imm8 }, &.{ 0x0f, 0xba }, 5, .short, .none }, + .{ .bts, .mi, &.{ .rm32, .imm8 }, &.{ 0x0f, 0xba }, 5, .none, .none }, + .{ .bts, .mi, &.{ .rm64, .imm8 }, &.{ 0x0f, 0xba }, 5, .long, .none }, // This is M encoding according to Intel, but D makes more sense here. .{ .call, .d, &.{ .rel32 }, &.{ 0xe8 }, 0, .none, .none }, .{ .call, .m, &.{ .rm64 }, &.{ 0xff }, 2, .none, .none }, - .{ .cbw, .np, &.{ .o16 }, &.{ 0x98 }, 0, .none, .none }, - .{ .cwde, .np, &.{ .o32 }, &.{ 0x98 }, 0, .none, .none }, - .{ .cdqe, .np, &.{ .o64 }, &.{ 0x98 }, 0, .long, .none }, - - .{ .cwd, .np, &.{ .o16 }, &.{ 0x99 }, 0, .none, .none }, - .{ .cdq, .np, &.{ .o32 }, &.{ 0x99 }, 0, .none, .none }, - .{ .cqo, .np, &.{ .o64 }, &.{ 0x99 }, 0, .long, .none }, - - .{ .cmova, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, - .{ .cmova, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, - .{ .cmova, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x47 }, 0, .long, .none }, - .{ .cmovae, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, - .{ .cmovae, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, - .{ .cmovae, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, - .{ .cmovb, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, - .{ .cmovb, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, - .{ .cmovb, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, - .{ .cmovbe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, - .{ .cmovbe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, - .{ .cmovbe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x46 }, 0, .long, .none }, - .{ .cmovc, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, - .{ .cmovc, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, - .{ .cmovc, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, - .{ .cmove, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, - .{ .cmove, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, - .{ .cmove, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x44 }, 0, .long, .none }, - .{ .cmovg, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, - .{ .cmovg, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, - .{ .cmovg, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4f }, 0, .long, .none }, - .{ .cmovge, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, - .{ .cmovge, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, - .{ .cmovge, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4d }, 0, .long, .none }, - .{ .cmovl, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, - .{ .cmovl, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, - .{ .cmovl, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4c }, 0, .long, .none }, - .{ .cmovle, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, - .{ .cmovle, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, - .{ .cmovle, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4e }, 0, .long, .none }, - .{ .cmovna, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, - .{ .cmovna, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, - .{ .cmovna, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x46 }, 0, .long, .none }, - .{ .cmovnae, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, - .{ .cmovnae, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, - .{ .cmovnae, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, - .{ .cmovnb, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, - .{ .cmovnb, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, - .{ .cmovnb, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, - .{ .cmovnbe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, - .{ .cmovnbe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, - .{ .cmovnbe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x47 }, 0, .long, .none }, - .{ .cmovnc, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, - .{ .cmovnc, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, - .{ .cmovnc, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, - .{ .cmovne, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, - .{ .cmovne, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, - .{ .cmovne, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x45 }, 0, .long, .none }, - .{ .cmovng, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, - .{ .cmovng, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, - .{ .cmovng, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4e }, 0, .long, .none }, - .{ .cmovnge, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, - .{ .cmovnge, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, - .{ .cmovnge, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4c }, 0, .long, .none }, - .{ .cmovnl, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, - .{ .cmovnl, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, - .{ .cmovnl, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4d }, 0, .long, .none }, - .{ .cmovnle, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, - .{ .cmovnle, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, - .{ .cmovnle, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4f }, 0, .long, .none }, - .{ .cmovno, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x41 }, 0, .none, .none }, - .{ .cmovno, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x41 }, 0, .none, .none }, - .{ .cmovno, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x41 }, 0, .long, .none }, - .{ .cmovnp, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, - .{ .cmovnp, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, - .{ .cmovnp, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4b }, 0, .long, .none }, - .{ .cmovns, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x49 }, 0, .none, .none }, - .{ .cmovns, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x49 }, 0, .none, .none }, - .{ .cmovns, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x49 }, 0, .long, .none }, - .{ .cmovnz, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, - .{ .cmovnz, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, - .{ .cmovnz, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x45 }, 0, .long, .none }, - .{ .cmovo, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x40 }, 0, .none, .none }, - .{ .cmovo, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x40 }, 0, .none, .none }, - .{ .cmovo, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x40 }, 0, .long, .none }, - .{ .cmovp, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, - .{ .cmovp, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, - .{ .cmovp, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4a }, 0, .long, .none }, - .{ .cmovpe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, - .{ .cmovpe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, - .{ .cmovpe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4a }, 0, .long, .none }, - .{ .cmovpo, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, - .{ .cmovpo, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, - .{ .cmovpo, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4b }, 0, .long, .none }, - .{ .cmovs, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x48 }, 0, .none, .none }, - .{ .cmovs, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x48 }, 0, .none, .none }, - .{ .cmovs, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x48 }, 0, .long, .none }, - .{ .cmovz, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, - .{ .cmovz, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, - .{ .cmovz, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x44 }, 0, .long, .none }, - - .{ .cmp, .zi, &.{ .al, .imm8 }, &.{ 0x3c }, 0, .none, .none }, - .{ .cmp, .zi, &.{ .ax, .imm16 }, &.{ 0x3d }, 0, .none, .none }, - .{ .cmp, .zi, &.{ .eax, .imm32 }, &.{ 0x3d }, 0, .none, .none }, - .{ .cmp, .zi, &.{ .rax, .imm32s }, &.{ 0x3d }, 0, .long, .none }, - .{ .cmp, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 7, .none, .none }, - .{ .cmp, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 7, .rex, .none }, - .{ .cmp, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 7, .none, .none }, - .{ .cmp, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 7, .none, .none }, - .{ .cmp, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 7, .long, .none }, - .{ .cmp, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 7, .none, .none }, - .{ .cmp, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 7, .none, .none }, - .{ .cmp, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 7, .long, .none }, - .{ .cmp, .mr, &.{ .rm8, .r8 }, &.{ 0x38 }, 0, .none, .none }, - .{ .cmp, .mr, &.{ .rm8, .r8 }, &.{ 0x38 }, 0, .rex, .none }, - .{ .cmp, .mr, &.{ .rm16, .r16 }, &.{ 0x39 }, 0, .none, .none }, - .{ .cmp, .mr, &.{ .rm32, .r32 }, &.{ 0x39 }, 0, .none, .none }, - .{ .cmp, .mr, &.{ .rm64, .r64 }, &.{ 0x39 }, 0, .long, .none }, - .{ .cmp, .rm, &.{ .r8, .rm8 }, &.{ 0x3a }, 0, .none, .none }, - .{ .cmp, .rm, &.{ .r8, .rm8 }, &.{ 0x3a }, 0, .rex, .none }, - .{ .cmp, .rm, &.{ .r16, .rm16 }, &.{ 0x3b }, 0, .none, .none }, - .{ .cmp, .rm, &.{ .r32, .rm32 }, &.{ 0x3b }, 0, .none, .none }, - .{ .cmp, .rm, &.{ .r64, .rm64 }, &.{ 0x3b }, 0, .long, .none }, - - .{ .cmps, .np, &.{ .m8, .m8 }, &.{ 0xa6 }, 0, .none, .none }, - .{ .cmps, .np, &.{ .m16, .m16 }, &.{ 0xa7 }, 0, .none, .none }, - .{ .cmps, .np, &.{ .m32, .m32 }, &.{ 0xa7 }, 0, .none, .none }, - .{ .cmps, .np, &.{ .m64, .m64 }, &.{ 0xa7 }, 0, .long, .none }, + .{ .cbw, .np, &.{ .o16 }, &.{ 0x98 }, 0, .short, .none }, + .{ .cwde, .np, &.{ .o32 }, &.{ 0x98 }, 0, .none, .none }, + .{ .cdqe, .np, &.{ .o64 }, &.{ 0x98 }, 0, .long, .none }, + + .{ .cwd, .np, &.{ .o16 }, &.{ 0x99 }, 0, .short, .none }, + .{ .cdq, .np, &.{ .o32 }, &.{ 0x99 }, 0, .none, .none }, + .{ .cqo, .np, &.{ .o64 }, &.{ 0x99 }, 0, .long, .none }, + + .{ .cmova, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x47 }, 0, .short, .none }, + .{ .cmova, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, + .{ .cmova, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x47 }, 0, .long, .none }, + .{ .cmovae, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .short, .none }, + .{ .cmovae, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, + .{ .cmovae, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, + .{ .cmovb, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .short, .none }, + .{ .cmovb, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, + .{ .cmovb, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, + .{ .cmovbe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x46 }, 0, .short, .none }, + .{ .cmovbe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, + .{ .cmovbe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x46 }, 0, .long, .none }, + .{ .cmovc, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .short, .none }, + .{ .cmovc, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, + .{ .cmovc, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, + .{ .cmove, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x44 }, 0, .short, .none }, + .{ .cmove, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, + .{ .cmove, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x44 }, 0, .long, .none }, + .{ .cmovg, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4f }, 0, .short, .none }, + .{ .cmovg, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, + .{ .cmovg, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4f }, 0, .long, .none }, + .{ .cmovge, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4d }, 0, .short, .none }, + .{ .cmovge, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, + .{ .cmovge, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4d }, 0, .long, .none }, + .{ .cmovl, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4c }, 0, .short, .none }, + .{ .cmovl, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, + .{ .cmovl, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4c }, 0, .long, .none }, + .{ .cmovle, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4e }, 0, .short, .none }, + .{ .cmovle, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, + .{ .cmovle, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4e }, 0, .long, .none }, + .{ .cmovna, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x46 }, 0, .short, .none }, + .{ .cmovna, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x46 }, 0, .none, .none }, + .{ .cmovna, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x46 }, 0, .long, .none }, + .{ .cmovnae, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x42 }, 0, .short, .none }, + .{ .cmovnae, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x42 }, 0, .none, .none }, + .{ .cmovnae, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x42 }, 0, .long, .none }, + .{ .cmovnb, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .short, .none }, + .{ .cmovnb, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, + .{ .cmovnb, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, + .{ .cmovnbe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x47 }, 0, .short, .none }, + .{ .cmovnbe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x47 }, 0, .none, .none }, + .{ .cmovnbe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x47 }, 0, .long, .none }, + .{ .cmovnc, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x43 }, 0, .short, .none }, + .{ .cmovnc, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x43 }, 0, .none, .none }, + .{ .cmovnc, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x43 }, 0, .long, .none }, + .{ .cmovne, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x45 }, 0, .short, .none }, + .{ .cmovne, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, + .{ .cmovne, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x45 }, 0, .long, .none }, + .{ .cmovng, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4e }, 0, .short, .none }, + .{ .cmovng, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4e }, 0, .none, .none }, + .{ .cmovng, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4e }, 0, .long, .none }, + .{ .cmovnge, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4c }, 0, .short, .none }, + .{ .cmovnge, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4c }, 0, .none, .none }, + .{ .cmovnge, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4c }, 0, .long, .none }, + .{ .cmovnl, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4d }, 0, .short, .none }, + .{ .cmovnl, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4d }, 0, .none, .none }, + .{ .cmovnl, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4d }, 0, .long, .none }, + .{ .cmovnle, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4f }, 0, .short, .none }, + .{ .cmovnle, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4f }, 0, .none, .none }, + .{ .cmovnle, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4f }, 0, .long, .none }, + .{ .cmovno, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x41 }, 0, .short, .none }, + .{ .cmovno, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x41 }, 0, .none, .none }, + .{ .cmovno, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x41 }, 0, .long, .none }, + .{ .cmovnp, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4b }, 0, .short, .none }, + .{ .cmovnp, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, + .{ .cmovnp, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4b }, 0, .long, .none }, + .{ .cmovns, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x49 }, 0, .short, .none }, + .{ .cmovns, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x49 }, 0, .none, .none }, + .{ .cmovns, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x49 }, 0, .long, .none }, + .{ .cmovnz, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x45 }, 0, .short, .none }, + .{ .cmovnz, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x45 }, 0, .none, .none }, + .{ .cmovnz, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x45 }, 0, .long, .none }, + .{ .cmovo, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x40 }, 0, .short, .none }, + .{ .cmovo, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x40 }, 0, .none, .none }, + .{ .cmovo, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x40 }, 0, .long, .none }, + .{ .cmovp, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4a }, 0, .short, .none }, + .{ .cmovp, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, + .{ .cmovp, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4a }, 0, .long, .none }, + .{ .cmovpe, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4a }, 0, .short, .none }, + .{ .cmovpe, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4a }, 0, .none, .none }, + .{ .cmovpe, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4a }, 0, .long, .none }, + .{ .cmovpo, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x4b }, 0, .short, .none }, + .{ .cmovpo, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x4b }, 0, .none, .none }, + .{ .cmovpo, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x4b }, 0, .long, .none }, + .{ .cmovs, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x48 }, 0, .short, .none }, + .{ .cmovs, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x48 }, 0, .none, .none }, + .{ .cmovs, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x48 }, 0, .long, .none }, + .{ .cmovz, .rm, &.{ .r16, .rm16 }, &.{ 0x0f, 0x44 }, 0, .short, .none }, + .{ .cmovz, .rm, &.{ .r32, .rm32 }, &.{ 0x0f, 0x44 }, 0, .none, .none }, + .{ .cmovz, .rm, &.{ .r64, .rm64 }, &.{ 0x0f, 0x44 }, 0, .long, .none }, + + .{ .cmp, .zi, &.{ .al, .imm8 }, &.{ 0x3c }, 0, .none, .none }, + .{ .cmp, .zi, &.{ .ax, .imm16 }, &.{ 0x3d }, 0, .short, .none }, + .{ .cmp, .zi, &.{ .eax, .imm32 }, &.{ 0x3d }, 0, .none, .none }, + .{ .cmp, .zi, &.{ .rax, .imm32s }, &.{ 0x3d }, 0, .long, .none }, + .{ .cmp, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 7, .none, .none }, + .{ .cmp, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 7, .rex, .none }, + .{ .cmp, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 7, .short, .none }, + .{ .cmp, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 7, .none, .none }, + .{ .cmp, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 7, .long, .none }, + .{ .cmp, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 7, .short, .none }, + .{ .cmp, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 7, .none, .none }, + .{ .cmp, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 7, .long, .none }, + .{ .cmp, .mr, &.{ .rm8, .r8 }, &.{ 0x38 }, 0, .none, .none }, + .{ .cmp, .mr, &.{ .rm8, .r8 }, &.{ 0x38 }, 0, .rex, .none }, + .{ .cmp, .mr, &.{ .rm16, .r16 }, &.{ 0x39 }, 0, .short, .none }, + .{ .cmp, .mr, &.{ .rm32, .r32 }, &.{ 0x39 }, 0, .none, .none }, + .{ .cmp, .mr, &.{ .rm64, .r64 }, &.{ 0x39 }, 0, .long, .none }, + .{ .cmp, .rm, &.{ .r8, .rm8 }, &.{ 0x3a }, 0, .none, .none }, + .{ .cmp, .rm, &.{ .r8, .rm8 }, &.{ 0x3a }, 0, .rex, .none }, + .{ .cmp, .rm, &.{ .r16, .rm16 }, &.{ 0x3b }, 0, .short, .none }, + .{ .cmp, .rm, &.{ .r32, .rm32 }, &.{ 0x3b }, 0, .none, .none }, + .{ .cmp, .rm, &.{ .r64, .rm64 }, &.{ 0x3b }, 0, .long, .none }, + + .{ .cmps, .np, &.{ .m8, .m8 }, &.{ 0xa6 }, 0, .none, .none }, + .{ .cmps, .np, &.{ .m16, .m16 }, &.{ 0xa7 }, 0, .short, .none }, + .{ .cmps, .np, &.{ .m32, .m32 }, &.{ 0xa7 }, 0, .none, .none }, + .{ .cmps, .np, &.{ .m64, .m64 }, &.{ 0xa7 }, 0, .long, .none }, .{ .cmpsb, .np, &.{}, &.{ 0xa6 }, 0, .none, .none }, .{ .cmpsw, .np, &.{}, &.{ 0xa7 }, 0, .short, .none }, .{ .cmpsd, .np, &.{}, &.{ 0xa7 }, 0, .none, .none }, .{ .cmpsq, .np, &.{}, &.{ 0xa7 }, 0, .long, .none }, - .{ .cmpxchg, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xb0 }, 0, .none, .none }, - .{ .cmpxchg, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xb0 }, 0, .rex, .none }, - .{ .cmpxchg, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xb1 }, 0, .none, .none }, - .{ .cmpxchg, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xb1 }, 0, .none, .none }, - .{ .cmpxchg, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xb1 }, 0, .long, .none }, + .{ .cmpxchg, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xb0 }, 0, .none, .none }, + .{ .cmpxchg, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xb0 }, 0, .rex, .none }, + .{ .cmpxchg, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xb1 }, 0, .short, .none }, + .{ .cmpxchg, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xb1 }, 0, .none, .none }, + .{ .cmpxchg, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xb1 }, 0, .long, .none }, .{ .cmpxchg8b, .m, &.{ .m64 }, &.{ 0x0f, 0xc7 }, 1, .none, .none }, .{ .cmpxchg16b, .m, &.{ .m128 }, &.{ 0x0f, 0xc7 }, 1, .long, .none }, - .{ .div, .m, &.{ .rm8 }, &.{ 0xf6 }, 6, .none, .none }, - .{ .div, .m, &.{ .rm8 }, &.{ 0xf6 }, 6, .rex, .none }, - .{ .div, .m, &.{ .rm16 }, &.{ 0xf7 }, 6, .none, .none }, - .{ .div, .m, &.{ .rm32 }, &.{ 0xf7 }, 6, .none, .none }, - .{ .div, .m, &.{ .rm64 }, &.{ 0xf7 }, 6, .long, .none }, + .{ .div, .m, &.{ .rm8 }, &.{ 0xf6 }, 6, .none, .none }, + .{ .div, .m, &.{ .rm8 }, &.{ 0xf6 }, 6, .rex, .none }, + .{ .div, .m, &.{ .rm16 }, &.{ 0xf7 }, 6, .short, .none }, + .{ .div, .m, &.{ .rm32 }, &.{ 0xf7 }, 6, .none, .none }, + .{ .div, .m, &.{ .rm64 }, &.{ 0xf7 }, 6, .long, .none }, .{ .fisttp, .m, &.{ .m16 }, &.{ 0xdf }, 1, .none, .x87 }, .{ .fisttp, .m, &.{ .m32 }, &.{ 0xdb }, 1, .none, .x87 }, @@ -280,26 +280,26 @@ pub const table = [_]Entry{ .{ .fld, .m, &.{ .m64 }, &.{ 0xdd }, 0, .none, .x87 }, .{ .fld, .m, &.{ .m80 }, &.{ 0xdb }, 5, .none, .x87 }, - .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .none, .none }, - .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .rex, .none }, - .{ .idiv, .m, &.{ .rm16 }, &.{ 0xf7 }, 7, .none, .none }, - .{ .idiv, .m, &.{ .rm32 }, &.{ 0xf7 }, 7, .none, .none }, - .{ .idiv, .m, &.{ .rm64 }, &.{ 0xf7 }, 7, .long, .none }, - - .{ .imul, .m, &.{ .rm8 }, &.{ 0xf6 }, 5, .none, .none }, - .{ .imul, .m, &.{ .rm8 }, &.{ 0xf6 }, 5, .rex, .none }, - .{ .imul, .m, &.{ .rm16, }, &.{ 0xf7 }, 5, .none, .none }, - .{ .imul, .m, &.{ .rm32, }, &.{ 0xf7 }, 5, .none, .none }, - .{ .imul, .m, &.{ .rm64, }, &.{ 0xf7 }, 5, .long, .none }, - .{ .imul, .rm, &.{ .r16, .rm16, }, &.{ 0x0f, 0xaf }, 0, .none, .none }, - .{ .imul, .rm, &.{ .r32, .rm32, }, &.{ 0x0f, 0xaf }, 0, .none, .none }, - .{ .imul, .rm, &.{ .r64, .rm64, }, &.{ 0x0f, 0xaf }, 0, .long, .none }, - .{ .imul, .rmi, &.{ .r16, .rm16, .imm8s }, &.{ 0x6b }, 0, .none, .none }, - .{ .imul, .rmi, &.{ .r32, .rm32, .imm8s }, &.{ 0x6b }, 0, .none, .none }, - .{ .imul, .rmi, &.{ .r64, .rm64, .imm8s }, &.{ 0x6b }, 0, .long, .none }, - .{ .imul, .rmi, &.{ .r16, .rm16, .imm16 }, &.{ 0x69 }, 0, .none, .none }, - .{ .imul, .rmi, &.{ .r32, .rm32, .imm32 }, &.{ 0x69 }, 0, .none, .none }, - .{ .imul, .rmi, &.{ .r64, .rm64, .imm32 }, &.{ 0x69 }, 0, .long, .none }, + .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .none, .none }, + .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .rex, .none }, + .{ .idiv, .m, &.{ .rm16 }, &.{ 0xf7 }, 7, .short, .none }, + .{ .idiv, .m, &.{ .rm32 }, &.{ 0xf7 }, 7, .none, .none }, + .{ .idiv, .m, &.{ .rm64 }, &.{ 0xf7 }, 7, .long, .none }, + + .{ .imul, .m, &.{ .rm8 }, &.{ 0xf6 }, 5, .none, .none }, + .{ .imul, .m, &.{ .rm8 }, &.{ 0xf6 }, 5, .rex, .none }, + .{ .imul, .m, &.{ .rm16, }, &.{ 0xf7 }, 5, .short, .none }, + .{ .imul, .m, &.{ .rm32, }, &.{ 0xf7 }, 5, .none, .none }, + .{ .imul, .m, &.{ .rm64, }, &.{ 0xf7 }, 5, .long, .none }, + .{ .imul, .rm, &.{ .r16, .rm16, }, &.{ 0x0f, 0xaf }, 0, .short, .none }, + .{ .imul, .rm, &.{ .r32, .rm32, }, &.{ 0x0f, 0xaf }, 0, .none, .none }, + .{ .imul, .rm, &.{ .r64, .rm64, }, &.{ 0x0f, 0xaf }, 0, .long, .none }, + .{ .imul, .rmi, &.{ .r16, .rm16, .imm8s }, &.{ 0x6b }, 0, .short, .none }, + .{ .imul, .rmi, &.{ .r32, .rm32, .imm8s }, &.{ 0x6b }, 0, .none, .none }, + .{ .imul, .rmi, &.{ .r64, .rm64, .imm8s }, &.{ 0x6b }, 0, .long, .none }, + .{ .imul, .rmi, &.{ .r16, .rm16, .imm16 }, &.{ 0x69 }, 0, .short, .none }, + .{ .imul, .rmi, &.{ .r32, .rm32, .imm32 }, &.{ 0x69 }, 0, .none, .none }, + .{ .imul, .rmi, &.{ .r64, .rm64, .imm32 }, &.{ 0x69 }, 0, .long, .none }, .{ .int3, .np, &.{}, &.{ 0xcc }, 0, .none, .none }, @@ -338,281 +338,283 @@ pub const table = [_]Entry{ .{ .jmp, .d, &.{ .rel32 }, &.{ 0xe9 }, 0, .none, .none }, .{ .jmp, .m, &.{ .rm64 }, &.{ 0xff }, 4, .none, .none }, - .{ .lea, .rm, &.{ .r16, .m }, &.{ 0x8d }, 0, .none, .none }, - .{ .lea, .rm, &.{ .r32, .m }, &.{ 0x8d }, 0, .none, .none }, - .{ .lea, .rm, &.{ .r64, .m }, &.{ 0x8d }, 0, .long, .none }, + .{ .lea, .rm, &.{ .r16, .m }, &.{ 0x8d }, 0, .short, .none }, + .{ .lea, .rm, &.{ .r32, .m }, &.{ 0x8d }, 0, .none, .none }, + .{ .lea, .rm, &.{ .r64, .m }, &.{ 0x8d }, 0, .long, .none }, .{ .lfence, .np, &.{}, &.{ 0x0f, 0xae, 0xe8 }, 0, .none, .none }, - .{ .lods, .np, &.{ .m8 }, &.{ 0xac }, 0, .none, .none }, - .{ .lods, .np, &.{ .m16 }, &.{ 0xad }, 0, .none, .none }, - .{ .lods, .np, &.{ .m32 }, &.{ 0xad }, 0, .none, .none }, - .{ .lods, .np, &.{ .m64 }, &.{ 0xad }, 0, .long, .none }, + .{ .lods, .np, &.{ .m8 }, &.{ 0xac }, 0, .none, .none }, + .{ .lods, .np, &.{ .m16 }, &.{ 0xad }, 0, .short, .none }, + .{ .lods, .np, &.{ .m32 }, &.{ 0xad }, 0, .none, .none }, + .{ .lods, .np, &.{ .m64 }, &.{ 0xad }, 0, .long, .none }, .{ .lodsb, .np, &.{}, &.{ 0xac }, 0, .none, .none }, .{ .lodsw, .np, &.{}, &.{ 0xad }, 0, .short, .none }, .{ .lodsd, .np, &.{}, &.{ 0xad }, 0, .none, .none }, .{ .lodsq, .np, &.{}, &.{ 0xad }, 0, .long, .none }, - .{ .lzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .none, .none }, - .{ .lzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .none, .none }, - .{ .lzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .long, .none }, + .{ .lzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .short, .none }, + .{ .lzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .none, .none }, + .{ .lzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .long, .none }, .{ .mfence, .np, &.{}, &.{ 0x0f, 0xae, 0xf0 }, 0, .none, .none }, - .{ .mov, .mr, &.{ .rm8, .r8 }, &.{ 0x88 }, 0, .none, .none }, - .{ .mov, .mr, &.{ .rm8, .r8 }, &.{ 0x88 }, 0, .rex, .none }, - .{ .mov, .mr, &.{ .rm16, .r16 }, &.{ 0x89 }, 0, .none, .none }, - .{ .mov, .mr, &.{ .rm32, .r32 }, &.{ 0x89 }, 0, .none, .none }, - .{ .mov, .mr, &.{ .rm64, .r64 }, &.{ 0x89 }, 0, .long, .none }, - .{ .mov, .rm, &.{ .r8, .rm8 }, &.{ 0x8a }, 0, .none, .none }, - .{ .mov, .rm, &.{ .r8, .rm8 }, &.{ 0x8a }, 0, .rex, .none }, - .{ .mov, .rm, &.{ .r16, .rm16 }, &.{ 0x8b }, 0, .none, .none }, - .{ .mov, .rm, &.{ .r32, .rm32 }, &.{ 0x8b }, 0, .none, .none }, - .{ .mov, .rm, &.{ .r64, .rm64 }, &.{ 0x8b }, 0, .long, .none }, - .{ .mov, .mr, &.{ .rm16, .sreg }, &.{ 0x8c }, 0, .none, .none }, - .{ .mov, .mr, &.{ .rm64, .sreg }, &.{ 0x8c }, 0, .long, .none }, - .{ .mov, .rm, &.{ .sreg, .rm16 }, &.{ 0x8e }, 0, .none, .none }, - .{ .mov, .rm, &.{ .sreg, .rm64 }, &.{ 0x8e }, 0, .long, .none }, - .{ .mov, .fd, &.{ .al, .moffs }, &.{ 0xa0 }, 0, .none, .none }, - .{ .mov, .fd, &.{ .ax, .moffs }, &.{ 0xa1 }, 0, .none, .none }, - .{ .mov, .fd, &.{ .eax, .moffs }, &.{ 0xa1 }, 0, .none, .none }, - .{ .mov, .fd, &.{ .rax, .moffs }, &.{ 0xa1 }, 0, .long, .none }, - .{ .mov, .td, &.{ .moffs, .al }, &.{ 0xa2 }, 0, .none, .none }, - .{ .mov, .td, &.{ .moffs, .ax }, &.{ 0xa3 }, 0, .none, .none }, - .{ .mov, .td, &.{ .moffs, .eax }, &.{ 0xa3 }, 0, .none, .none }, - .{ .mov, .td, &.{ .moffs, .rax }, &.{ 0xa3 }, 0, .long, .none }, - .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .none, .none }, - .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .rex, .none }, - .{ .mov, .oi, &.{ .r16, .imm16 }, &.{ 0xb8 }, 0, .none, .none }, - .{ .mov, .oi, &.{ .r32, .imm32 }, &.{ 0xb8 }, 0, .none, .none }, - .{ .mov, .oi, &.{ .r64, .imm64 }, &.{ 0xb8 }, 0, .long, .none }, - .{ .mov, .mi, &.{ .rm8, .imm8 }, &.{ 0xc6 }, 0, .none, .none }, - .{ .mov, .mi, &.{ .rm8, .imm8 }, &.{ 0xc6 }, 0, .rex, .none }, - .{ .mov, .mi, &.{ .rm16, .imm16 }, &.{ 0xc7 }, 0, .none, .none }, - .{ .mov, .mi, &.{ .rm32, .imm32 }, &.{ 0xc7 }, 0, .none, .none }, - .{ .mov, .mi, &.{ .rm64, .imm32s }, &.{ 0xc7 }, 0, .long, .none }, - - .{ .movbe, .rm, &.{ .r16, .m16 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .none, .none }, - .{ .movbe, .rm, &.{ .r32, .m32 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .none, .none }, - .{ .movbe, .rm, &.{ .r64, .m64 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .long, .none }, - .{ .movbe, .mr, &.{ .m16, .r16 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .none, .none }, - .{ .movbe, .mr, &.{ .m32, .r32 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .none, .none }, - .{ .movbe, .mr, &.{ .m64, .r64 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .long, .none }, - - .{ .movs, .np, &.{ .m8, .m8 }, &.{ 0xa4 }, 0, .none, .none }, - .{ .movs, .np, &.{ .m16, .m16 }, &.{ 0xa5 }, 0, .none, .none }, - .{ .movs, .np, &.{ .m32, .m32 }, &.{ 0xa5 }, 0, .none, .none }, - .{ .movs, .np, &.{ .m64, .m64 }, &.{ 0xa5 }, 0, .long, .none }, + .{ .mov, .mr, &.{ .rm8, .r8 }, &.{ 0x88 }, 0, .none, .none }, + .{ .mov, .mr, &.{ .rm8, .r8 }, &.{ 0x88 }, 0, .rex, .none }, + .{ .mov, .mr, &.{ .rm16, .r16 }, &.{ 0x89 }, 0, .short, .none }, + .{ .mov, .mr, &.{ .rm32, .r32 }, &.{ 0x89 }, 0, .none, .none }, + .{ .mov, .mr, &.{ .rm64, .r64 }, &.{ 0x89 }, 0, .long, .none }, + .{ .mov, .rm, &.{ .r8, .rm8 }, &.{ 0x8a }, 0, .none, .none }, + .{ .mov, .rm, &.{ .r8, .rm8 }, &.{ 0x8a }, 0, .rex, .none }, + .{ .mov, .rm, &.{ .r16, .rm16 }, &.{ 0x8b }, 0, .short, .none }, + .{ .mov, .rm, &.{ .r32, .rm32 }, &.{ 0x8b }, 0, .none, .none }, + .{ .mov, .rm, &.{ .r64, .rm64 }, &.{ 0x8b }, 0, .long, .none }, + .{ .mov, .mr, &.{ .rm16, .sreg }, &.{ 0x8c }, 0, .short, .none }, + .{ .mov, .mr, &.{ .r32_m16, .sreg }, &.{ 0x8c }, 0, .none, .none }, + .{ .mov, .mr, &.{ .r64_m16, .sreg }, &.{ 0x8c }, 0, .long, .none }, + .{ .mov, .rm, &.{ .sreg, .rm16 }, &.{ 0x8e }, 0, .short, .none }, + .{ .mov, .rm, &.{ .sreg, .r32_m16 }, &.{ 0x8e }, 0, .none, .none }, + .{ .mov, .rm, &.{ .sreg, .r64_m16 }, &.{ 0x8e }, 0, .long, .none }, + .{ .mov, .fd, &.{ .al, .moffs }, &.{ 0xa0 }, 0, .none, .none }, + .{ .mov, .fd, &.{ .ax, .moffs }, &.{ 0xa1 }, 0, .none, .none }, + .{ .mov, .fd, &.{ .eax, .moffs }, &.{ 0xa1 }, 0, .none, .none }, + .{ .mov, .fd, &.{ .rax, .moffs }, &.{ 0xa1 }, 0, .long, .none }, + .{ .mov, .td, &.{ .moffs, .al }, &.{ 0xa2 }, 0, .none, .none }, + .{ .mov, .td, &.{ .moffs, .ax }, &.{ 0xa3 }, 0, .none, .none }, + .{ .mov, .td, &.{ .moffs, .eax }, &.{ 0xa3 }, 0, .none, .none }, + .{ .mov, .td, &.{ .moffs, .rax }, &.{ 0xa3 }, 0, .long, .none }, + .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .none, .none }, + .{ .mov, .oi, &.{ .r8, .imm8 }, &.{ 0xb0 }, 0, .rex, .none }, + .{ .mov, .oi, &.{ .r16, .imm16 }, &.{ 0xb8 }, 0, .short, .none }, + .{ .mov, .oi, &.{ .r32, .imm32 }, &.{ 0xb8 }, 0, .none, .none }, + .{ .mov, .oi, &.{ .r64, .imm64 }, &.{ 0xb8 }, 0, .long, .none }, + .{ .mov, .mi, &.{ .rm8, .imm8 }, &.{ 0xc6 }, 0, .none, .none }, + .{ .mov, .mi, &.{ .rm8, .imm8 }, &.{ 0xc6 }, 0, .rex, .none }, + .{ .mov, .mi, &.{ .rm16, .imm16 }, &.{ 0xc7 }, 0, .short, .none }, + .{ .mov, .mi, &.{ .rm32, .imm32 }, &.{ 0xc7 }, 0, .none, .none }, + .{ .mov, .mi, &.{ .rm64, .imm32s }, &.{ 0xc7 }, 0, .long, .none }, + + .{ .movbe, .rm, &.{ .r16, .m16 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .short, .none }, + .{ .movbe, .rm, &.{ .r32, .m32 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .none, .none }, + .{ .movbe, .rm, &.{ .r64, .m64 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .long, .none }, + .{ .movbe, .mr, &.{ .m16, .r16 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .short, .none }, + .{ .movbe, .mr, &.{ .m32, .r32 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .none, .none }, + .{ .movbe, .mr, &.{ .m64, .r64 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .long, .none }, + + .{ .movs, .np, &.{ .m8, .m8 }, &.{ 0xa4 }, 0, .none, .none }, + .{ .movs, .np, &.{ .m16, .m16 }, &.{ 0xa5 }, 0, .short, .none }, + .{ .movs, .np, &.{ .m32, .m32 }, &.{ 0xa5 }, 0, .none, .none }, + .{ .movs, .np, &.{ .m64, .m64 }, &.{ 0xa5 }, 0, .long, .none }, .{ .movsb, .np, &.{}, &.{ 0xa4 }, 0, .none, .none }, .{ .movsw, .np, &.{}, &.{ 0xa5 }, 0, .short, .none }, .{ .movsd, .np, &.{}, &.{ 0xa5 }, 0, .none, .none }, .{ .movsq, .np, &.{}, &.{ 0xa5 }, 0, .long, .none }, - .{ .movsx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xbe }, 0, .none, .none }, - .{ .movsx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex, .none }, - .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .none, .none }, - .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex, .none }, - .{ .movsx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xbe }, 0, .long, .none }, - .{ .movsx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xbf }, 0, .none, .none }, - .{ .movsx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xbf }, 0, .long, .none }, + .{ .movsx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xbe }, 0, .short, .none }, + .{ .movsx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex_short, .none }, + .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .none, .none }, + .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex, .none }, + .{ .movsx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xbe }, 0, .long, .none }, + .{ .movsx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xbf }, 0, .none, .none }, + .{ .movsx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xbf }, 0, .long, .none }, // This instruction is discouraged. .{ .movsxd, .rm, &.{ .r32, .rm32 }, &.{ 0x63 }, 0, .none, .none }, .{ .movsxd, .rm, &.{ .r64, .rm32 }, &.{ 0x63 }, 0, .long, .none }, - .{ .movzx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .none, .none }, - .{ .movzx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .none, .none }, - .{ .movzx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .long, .none }, - .{ .movzx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .none, .none }, - .{ .movzx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .long, .none }, + .{ .movzx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .short, .none }, + .{ .movzx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .none, .none }, + .{ .movzx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .long, .none }, + .{ .movzx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .none, .none }, + .{ .movzx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .long, .none }, - .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .none, .none }, - .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .rex, .none }, - .{ .mul, .m, &.{ .rm16 }, &.{ 0xf7 }, 4, .none, .none }, - .{ .mul, .m, &.{ .rm32 }, &.{ 0xf7 }, 4, .none, .none }, - .{ .mul, .m, &.{ .rm64 }, &.{ 0xf7 }, 4, .long, .none }, + .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .none, .none }, + .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .rex, .none }, + .{ .mul, .m, &.{ .rm16 }, &.{ 0xf7 }, 4, .short, .none }, + .{ .mul, .m, &.{ .rm32 }, &.{ 0xf7 }, 4, .none, .none }, + .{ .mul, .m, &.{ .rm64 }, &.{ 0xf7 }, 4, .long, .none }, - .{ .neg, .m, &.{ .rm8 }, &.{ 0xf6 }, 3, .none, .none }, - .{ .neg, .m, &.{ .rm8 }, &.{ 0xf6 }, 3, .rex, .none }, - .{ .neg, .m, &.{ .rm16 }, &.{ 0xf7 }, 3, .none, .none }, - .{ .neg, .m, &.{ .rm32 }, &.{ 0xf7 }, 3, .none, .none }, - .{ .neg, .m, &.{ .rm64 }, &.{ 0xf7 }, 3, .long, .none }, + .{ .neg, .m, &.{ .rm8 }, &.{ 0xf6 }, 3, .none, .none }, + .{ .neg, .m, &.{ .rm8 }, &.{ 0xf6 }, 3, .rex, .none }, + .{ .neg, .m, &.{ .rm16 }, &.{ 0xf7 }, 3, .short, .none }, + .{ .neg, .m, &.{ .rm32 }, &.{ 0xf7 }, 3, .none, .none }, + .{ .neg, .m, &.{ .rm64 }, &.{ 0xf7 }, 3, .long, .none }, .{ .nop, .np, &.{}, &.{ 0x90 }, 0, .none, .none }, - .{ .not, .m, &.{ .rm8 }, &.{ 0xf6 }, 2, .none, .none }, - .{ .not, .m, &.{ .rm8 }, &.{ 0xf6 }, 2, .rex, .none }, - .{ .not, .m, &.{ .rm16 }, &.{ 0xf7 }, 2, .none, .none }, - .{ .not, .m, &.{ .rm32 }, &.{ 0xf7 }, 2, .none, .none }, - .{ .not, .m, &.{ .rm64 }, &.{ 0xf7 }, 2, .long, .none }, - - .{ .@"or", .zi, &.{ .al, .imm8 }, &.{ 0x0c }, 0, .none, .none }, - .{ .@"or", .zi, &.{ .ax, .imm16 }, &.{ 0x0d }, 0, .none, .none }, - .{ .@"or", .zi, &.{ .eax, .imm32 }, &.{ 0x0d }, 0, .none, .none }, - .{ .@"or", .zi, &.{ .rax, .imm32s }, &.{ 0x0d }, 0, .long, .none }, - .{ .@"or", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 1, .none, .none }, - .{ .@"or", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 1, .rex, .none }, - .{ .@"or", .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 1, .none, .none }, - .{ .@"or", .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 1, .none, .none }, - .{ .@"or", .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 1, .long, .none }, - .{ .@"or", .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 1, .none, .none }, - .{ .@"or", .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 1, .none, .none }, - .{ .@"or", .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 1, .long, .none }, - .{ .@"or", .mr, &.{ .rm8, .r8 }, &.{ 0x08 }, 0, .none, .none }, - .{ .@"or", .mr, &.{ .rm8, .r8 }, &.{ 0x08 }, 0, .rex, .none }, - .{ .@"or", .mr, &.{ .rm16, .r16 }, &.{ 0x09 }, 0, .none, .none }, - .{ .@"or", .mr, &.{ .rm32, .r32 }, &.{ 0x09 }, 0, .none, .none }, - .{ .@"or", .mr, &.{ .rm64, .r64 }, &.{ 0x09 }, 0, .long, .none }, - .{ .@"or", .rm, &.{ .r8, .rm8 }, &.{ 0x0a }, 0, .none, .none }, - .{ .@"or", .rm, &.{ .r8, .rm8 }, &.{ 0x0a }, 0, .rex, .none }, - .{ .@"or", .rm, &.{ .r16, .rm16 }, &.{ 0x0b }, 0, .none, .none }, - .{ .@"or", .rm, &.{ .r32, .rm32 }, &.{ 0x0b }, 0, .none, .none }, - .{ .@"or", .rm, &.{ .r64, .rm64 }, &.{ 0x0b }, 0, .long, .none }, - - .{ .pop, .o, &.{ .r16 }, &.{ 0x58 }, 0, .none, .none }, - .{ .pop, .o, &.{ .r64 }, &.{ 0x58 }, 0, .none, .none }, - .{ .pop, .m, &.{ .rm16 }, &.{ 0x8f }, 0, .none, .none }, - .{ .pop, .m, &.{ .rm64 }, &.{ 0x8f }, 0, .none, .none }, - - .{ .popcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .none, .none }, - .{ .popcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .none, .none }, - .{ .popcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .long, .none }, - - .{ .push, .o, &.{ .r16 }, &.{ 0x50 }, 0, .none, .none }, - .{ .push, .o, &.{ .r64 }, &.{ 0x50 }, 0, .none, .none }, - .{ .push, .m, &.{ .rm16 }, &.{ 0xff }, 6, .none, .none }, - .{ .push, .m, &.{ .rm64 }, &.{ 0xff }, 6, .none, .none }, - .{ .push, .i, &.{ .imm8 }, &.{ 0x6a }, 0, .none, .none }, - .{ .push, .i, &.{ .imm16 }, &.{ 0x68 }, 0, .none, .none }, - .{ .push, .i, &.{ .imm32 }, &.{ 0x68 }, 0, .none, .none }, + .{ .not, .m, &.{ .rm8 }, &.{ 0xf6 }, 2, .none, .none }, + .{ .not, .m, &.{ .rm8 }, &.{ 0xf6 }, 2, .rex, .none }, + .{ .not, .m, &.{ .rm16 }, &.{ 0xf7 }, 2, .short, .none }, + .{ .not, .m, &.{ .rm32 }, &.{ 0xf7 }, 2, .none, .none }, + .{ .not, .m, &.{ .rm64 }, &.{ 0xf7 }, 2, .long, .none }, + + .{ .@"or", .zi, &.{ .al, .imm8 }, &.{ 0x0c }, 0, .none, .none }, + .{ .@"or", .zi, &.{ .ax, .imm16 }, &.{ 0x0d }, 0, .short, .none }, + .{ .@"or", .zi, &.{ .eax, .imm32 }, &.{ 0x0d }, 0, .none, .none }, + .{ .@"or", .zi, &.{ .rax, .imm32s }, &.{ 0x0d }, 0, .long, .none }, + .{ .@"or", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 1, .none, .none }, + .{ .@"or", .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 1, .rex, .none }, + .{ .@"or", .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 1, .short, .none }, + .{ .@"or", .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 1, .none, .none }, + .{ .@"or", .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 1, .long, .none }, + .{ .@"or", .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 1, .short, .none }, + .{ .@"or", .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 1, .none, .none }, + .{ .@"or", .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 1, .long, .none }, + .{ .@"or", .mr, &.{ .rm8, .r8 }, &.{ 0x08 }, 0, .none, .none }, + .{ .@"or", .mr, &.{ .rm8, .r8 }, &.{ 0x08 }, 0, .rex, .none }, + .{ .@"or", .mr, &.{ .rm16, .r16 }, &.{ 0x09 }, 0, .short, .none }, + .{ .@"or", .mr, &.{ .rm32, .r32 }, &.{ 0x09 }, 0, .none, .none }, + .{ .@"or", .mr, &.{ .rm64, .r64 }, &.{ 0x09 }, 0, .long, .none }, + .{ .@"or", .rm, &.{ .r8, .rm8 }, &.{ 0x0a }, 0, .none, .none }, + .{ .@"or", .rm, &.{ .r8, .rm8 }, &.{ 0x0a }, 0, .rex, .none }, + .{ .@"or", .rm, &.{ .r16, .rm16 }, &.{ 0x0b }, 0, .short, .none }, + .{ .@"or", .rm, &.{ .r32, .rm32 }, &.{ 0x0b }, 0, .none, .none }, + .{ .@"or", .rm, &.{ .r64, .rm64 }, &.{ 0x0b }, 0, .long, .none }, + + .{ .pop, .o, &.{ .r16 }, &.{ 0x58 }, 0, .short, .none }, + .{ .pop, .o, &.{ .r64 }, &.{ 0x58 }, 0, .none, .none }, + .{ .pop, .m, &.{ .rm16 }, &.{ 0x8f }, 0, .short, .none }, + .{ .pop, .m, &.{ .rm64 }, &.{ 0x8f }, 0, .none, .none }, + + .{ .popcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .short, .none }, + .{ .popcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .none, .none }, + .{ .popcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .long, .none }, + + .{ .push, .o, &.{ .r16 }, &.{ 0x50 }, 0, .short, .none }, + .{ .push, .o, &.{ .r64 }, &.{ 0x50 }, 0, .none, .none }, + .{ .push, .m, &.{ .rm16 }, &.{ 0xff }, 6, .short, .none }, + .{ .push, .m, &.{ .rm64 }, &.{ 0xff }, 6, .none, .none }, + .{ .push, .i, &.{ .imm8 }, &.{ 0x6a }, 0, .none, .none }, + .{ .push, .i, &.{ .imm16 }, &.{ 0x68 }, 0, .short, .none }, + .{ .push, .i, &.{ .imm32 }, &.{ 0x68 }, 0, .none, .none }, .{ .ret, .np, &.{}, &.{ 0xc3 }, 0, .none, .none }, - .{ .rcl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 2, .none, .none }, - .{ .rcl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 2, .rex, .none }, - .{ .rcl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 2, .none, .none }, - .{ .rcl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 2, .rex, .none }, - .{ .rcl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 2, .none, .none }, - .{ .rcl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 2, .rex, .none }, - .{ .rcl, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 2, .none, .none }, - .{ .rcl, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 2, .none, .none }, - .{ .rcl, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 2, .none, .none }, - .{ .rcl, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 2, .none, .none }, - .{ .rcl, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 2, .long, .none }, - .{ .rcl, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 2, .none, .none }, - .{ .rcl, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 2, .long, .none }, - .{ .rcl, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 2, .none, .none }, - .{ .rcl, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 2, .long, .none }, - - .{ .rcr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 3, .none, .none }, - .{ .rcr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 3, .rex, .none }, - .{ .rcr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 3, .none, .none }, - .{ .rcr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 3, .rex, .none }, - .{ .rcr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 3, .none, .none }, - .{ .rcr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 3, .rex, .none }, - .{ .rcr, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 3, .none, .none }, - .{ .rcr, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 3, .none, .none }, - .{ .rcr, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 3, .none, .none }, - .{ .rcr, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 3, .none, .none }, - .{ .rcr, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 3, .long, .none }, - .{ .rcr, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 3, .none, .none }, - .{ .rcr, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 3, .long, .none }, - .{ .rcr, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 3, .none, .none }, - .{ .rcr, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 3, .long, .none }, - - .{ .rol, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 0, .none, .none }, - .{ .rol, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 0, .rex, .none }, - .{ .rol, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 0, .none, .none }, - .{ .rol, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 0, .rex, .none }, - .{ .rol, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 0, .none, .none }, - .{ .rol, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 0, .rex, .none }, - .{ .rol, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 0, .none, .none }, - .{ .rol, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 0, .none, .none }, - .{ .rol, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 0, .none, .none }, - .{ .rol, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 0, .none, .none }, - .{ .rol, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 0, .long, .none }, - .{ .rol, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 0, .none, .none }, - .{ .rol, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 0, .long, .none }, - .{ .rol, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 0, .none, .none }, - .{ .rol, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 0, .long, .none }, - - .{ .ror, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 1, .none, .none }, - .{ .ror, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 1, .rex, .none }, - .{ .ror, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 1, .none, .none }, - .{ .ror, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 1, .rex, .none }, - .{ .ror, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 1, .none, .none }, - .{ .ror, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 1, .rex, .none }, - .{ .ror, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 1, .none, .none }, - .{ .ror, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 1, .none, .none }, - .{ .ror, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 1, .none, .none }, - .{ .ror, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 1, .none, .none }, - .{ .ror, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 1, .long, .none }, - .{ .ror, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 1, .none, .none }, - .{ .ror, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 1, .long, .none }, - .{ .ror, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 1, .none, .none }, - .{ .ror, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 1, .long, .none }, - - .{ .sal, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .none, .none }, - .{ .sal, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .rex, .none }, - .{ .sal, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 4, .none, .none }, - .{ .sal, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 4, .none, .none }, - .{ .sal, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 4, .long, .none }, - .{ .sal, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .none, .none }, - .{ .sal, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .rex, .none }, - .{ .sal, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 4, .none, .none }, - .{ .sal, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 4, .none, .none }, - .{ .sal, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 4, .long, .none }, - .{ .sal, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .none, .none }, - .{ .sal, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .rex, .none }, - .{ .sal, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, - .{ .sal, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, - .{ .sal, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 4, .long, .none }, - - .{ .sar, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 7, .none, .none }, - .{ .sar, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 7, .rex, .none }, - .{ .sar, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 7, .none, .none }, - .{ .sar, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 7, .none, .none }, - .{ .sar, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 7, .long, .none }, - .{ .sar, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 7, .none, .none }, - .{ .sar, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 7, .rex, .none }, - .{ .sar, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 7, .none, .none }, - .{ .sar, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 7, .none, .none }, - .{ .sar, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 7, .long, .none }, - .{ .sar, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 7, .none, .none }, - .{ .sar, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 7, .rex, .none }, - .{ .sar, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 7, .none, .none }, - .{ .sar, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 7, .none, .none }, - .{ .sar, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 7, .long, .none }, - - .{ .sbb, .zi, &.{ .al, .imm8 }, &.{ 0x1c }, 0, .none, .none }, - .{ .sbb, .zi, &.{ .ax, .imm16 }, &.{ 0x1d }, 0, .none, .none }, - .{ .sbb, .zi, &.{ .eax, .imm32 }, &.{ 0x1d }, 0, .none, .none }, - .{ .sbb, .zi, &.{ .rax, .imm32s }, &.{ 0x1d }, 0, .long, .none }, - .{ .sbb, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 3, .none, .none }, - .{ .sbb, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 3, .rex, .none }, - .{ .sbb, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 3, .none, .none }, - .{ .sbb, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 3, .none, .none }, - .{ .sbb, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 3, .long, .none }, - .{ .sbb, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 3, .none, .none }, - .{ .sbb, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 3, .none, .none }, - .{ .sbb, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 3, .long, .none }, - .{ .sbb, .mr, &.{ .rm8, .r8 }, &.{ 0x18 }, 0, .none, .none }, - .{ .sbb, .mr, &.{ .rm8, .r8 }, &.{ 0x18 }, 0, .rex, .none }, - .{ .sbb, .mr, &.{ .rm16, .r16 }, &.{ 0x19 }, 0, .none, .none }, - .{ .sbb, .mr, &.{ .rm32, .r32 }, &.{ 0x19 }, 0, .none, .none }, - .{ .sbb, .mr, &.{ .rm64, .r64 }, &.{ 0x19 }, 0, .long, .none }, - .{ .sbb, .rm, &.{ .r8, .rm8 }, &.{ 0x1a }, 0, .none, .none }, - .{ .sbb, .rm, &.{ .r8, .rm8 }, &.{ 0x1a }, 0, .rex, .none }, - .{ .sbb, .rm, &.{ .r16, .rm16 }, &.{ 0x1b }, 0, .none, .none }, - .{ .sbb, .rm, &.{ .r32, .rm32 }, &.{ 0x1b }, 0, .none, .none }, - .{ .sbb, .rm, &.{ .r64, .rm64 }, &.{ 0x1b }, 0, .long, .none }, - - .{ .scas, .np, &.{ .m8 }, &.{ 0xae }, 0, .none, .none }, - .{ .scas, .np, &.{ .m16 }, &.{ 0xaf }, 0, .none, .none }, - .{ .scas, .np, &.{ .m32 }, &.{ 0xaf }, 0, .none, .none }, - .{ .scas, .np, &.{ .m64 }, &.{ 0xaf }, 0, .long, .none }, + .{ .rcl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 2, .none, .none }, + .{ .rcl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 2, .rex, .none }, + .{ .rcl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 2, .none, .none }, + .{ .rcl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 2, .rex, .none }, + .{ .rcl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 2, .none, .none }, + .{ .rcl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 2, .rex, .none }, + .{ .rcl, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 2, .short, .none }, + .{ .rcl, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 2, .short, .none }, + .{ .rcl, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 2, .short, .none }, + .{ .rcl, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 2, .none, .none }, + .{ .rcl, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 2, .long, .none }, + .{ .rcl, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 2, .none, .none }, + .{ .rcl, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 2, .long, .none }, + .{ .rcl, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 2, .none, .none }, + .{ .rcl, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 2, .long, .none }, + + .{ .rcr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 3, .none, .none }, + .{ .rcr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 3, .rex, .none }, + .{ .rcr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 3, .none, .none }, + .{ .rcr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 3, .rex, .none }, + .{ .rcr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 3, .none, .none }, + .{ .rcr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 3, .rex, .none }, + .{ .rcr, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 3, .short, .none }, + .{ .rcr, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 3, .short, .none }, + .{ .rcr, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 3, .short, .none }, + .{ .rcr, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 3, .none, .none }, + .{ .rcr, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 3, .long, .none }, + .{ .rcr, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 3, .none, .none }, + .{ .rcr, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 3, .long, .none }, + .{ .rcr, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 3, .none, .none }, + .{ .rcr, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 3, .long, .none }, + + .{ .rol, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 0, .none, .none }, + .{ .rol, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 0, .rex, .none }, + .{ .rol, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 0, .none, .none }, + .{ .rol, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 0, .rex, .none }, + .{ .rol, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 0, .none, .none }, + .{ .rol, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 0, .rex, .none }, + .{ .rol, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 0, .short, .none }, + .{ .rol, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 0, .short, .none }, + .{ .rol, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 0, .short, .none }, + .{ .rol, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 0, .none, .none }, + .{ .rol, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 0, .long, .none }, + .{ .rol, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 0, .none, .none }, + .{ .rol, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 0, .long, .none }, + .{ .rol, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 0, .none, .none }, + .{ .rol, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 0, .long, .none }, + + .{ .ror, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 1, .none, .none }, + .{ .ror, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 1, .rex, .none }, + .{ .ror, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 1, .none, .none }, + .{ .ror, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 1, .rex, .none }, + .{ .ror, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 1, .none, .none }, + .{ .ror, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 1, .rex, .none }, + .{ .ror, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 1, .short, .none }, + .{ .ror, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 1, .short, .none }, + .{ .ror, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 1, .short, .none }, + .{ .ror, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 1, .none, .none }, + .{ .ror, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 1, .long, .none }, + .{ .ror, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 1, .none, .none }, + .{ .ror, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 1, .long, .none }, + .{ .ror, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 1, .none, .none }, + .{ .ror, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 1, .long, .none }, + + .{ .sal, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .none, .none }, + .{ .sal, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .rex, .none }, + .{ .sal, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 4, .short, .none }, + .{ .sal, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 4, .none, .none }, + .{ .sal, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 4, .long, .none }, + .{ .sal, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .none, .none }, + .{ .sal, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .rex, .none }, + .{ .sal, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 4, .short, .none }, + .{ .sal, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 4, .none, .none }, + .{ .sal, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 4, .long, .none }, + .{ .sal, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .none, .none }, + .{ .sal, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .rex, .none }, + .{ .sal, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 4, .short, .none }, + .{ .sal, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, + .{ .sal, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 4, .long, .none }, + + .{ .sar, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 7, .none, .none }, + .{ .sar, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 7, .rex, .none }, + .{ .sar, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 7, .short, .none }, + .{ .sar, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 7, .none, .none }, + .{ .sar, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 7, .long, .none }, + .{ .sar, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 7, .none, .none }, + .{ .sar, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 7, .rex, .none }, + .{ .sar, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 7, .short, .none }, + .{ .sar, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 7, .none, .none }, + .{ .sar, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 7, .long, .none }, + .{ .sar, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 7, .none, .none }, + .{ .sar, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 7, .rex, .none }, + .{ .sar, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 7, .short, .none }, + .{ .sar, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 7, .none, .none }, + .{ .sar, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 7, .long, .none }, + + .{ .sbb, .zi, &.{ .al, .imm8 }, &.{ 0x1c }, 0, .none, .none }, + .{ .sbb, .zi, &.{ .ax, .imm16 }, &.{ 0x1d }, 0, .short, .none }, + .{ .sbb, .zi, &.{ .eax, .imm32 }, &.{ 0x1d }, 0, .none, .none }, + .{ .sbb, .zi, &.{ .rax, .imm32s }, &.{ 0x1d }, 0, .long, .none }, + .{ .sbb, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 3, .none, .none }, + .{ .sbb, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 3, .rex, .none }, + .{ .sbb, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 3, .short, .none }, + .{ .sbb, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 3, .none, .none }, + .{ .sbb, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 3, .long, .none }, + .{ .sbb, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 3, .short, .none }, + .{ .sbb, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 3, .none, .none }, + .{ .sbb, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 3, .long, .none }, + .{ .sbb, .mr, &.{ .rm8, .r8 }, &.{ 0x18 }, 0, .none, .none }, + .{ .sbb, .mr, &.{ .rm8, .r8 }, &.{ 0x18 }, 0, .rex, .none }, + .{ .sbb, .mr, &.{ .rm16, .r16 }, &.{ 0x19 }, 0, .short, .none }, + .{ .sbb, .mr, &.{ .rm32, .r32 }, &.{ 0x19 }, 0, .none, .none }, + .{ .sbb, .mr, &.{ .rm64, .r64 }, &.{ 0x19 }, 0, .long, .none }, + .{ .sbb, .rm, &.{ .r8, .rm8 }, &.{ 0x1a }, 0, .none, .none }, + .{ .sbb, .rm, &.{ .r8, .rm8 }, &.{ 0x1a }, 0, .rex, .none }, + .{ .sbb, .rm, &.{ .r16, .rm16 }, &.{ 0x1b }, 0, .short, .none }, + .{ .sbb, .rm, &.{ .r32, .rm32 }, &.{ 0x1b }, 0, .none, .none }, + .{ .sbb, .rm, &.{ .r64, .rm64 }, &.{ 0x1b }, 0, .long, .none }, + + .{ .scas, .np, &.{ .m8 }, &.{ 0xae }, 0, .none, .none }, + .{ .scas, .np, &.{ .m16 }, &.{ 0xaf }, 0, .short, .none }, + .{ .scas, .np, &.{ .m32 }, &.{ 0xaf }, 0, .none, .none }, + .{ .scas, .np, &.{ .m64 }, &.{ 0xaf }, 0, .long, .none }, .{ .scasb, .np, &.{}, &.{ 0xae }, 0, .none, .none }, .{ .scasw, .np, &.{}, &.{ 0xaf }, 0, .short, .none }, @@ -682,153 +684,153 @@ pub const table = [_]Entry{ .{ .sfence, .np, &.{}, &.{ 0x0f, 0xae, 0xf8 }, 0, .none, .none }, - .{ .shl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .none, .none }, - .{ .shl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .rex, .none }, - .{ .shl, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 4, .none, .none }, - .{ .shl, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 4, .none, .none }, - .{ .shl, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 4, .long, .none }, - .{ .shl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .none, .none }, - .{ .shl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .rex, .none }, - .{ .shl, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 4, .none, .none }, - .{ .shl, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 4, .none, .none }, - .{ .shl, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 4, .long, .none }, - .{ .shl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .none, .none }, - .{ .shl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .rex, .none }, - .{ .shl, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, - .{ .shl, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, - .{ .shl, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 4, .long, .none }, - - .{ .shld, .mri, &.{ .rm16, .r16, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .none, .none }, - .{ .shld, .mrc, &.{ .rm16, .r16, .cl }, &.{ 0x0f, 0xa5 }, 0, .none, .none }, - .{ .shld, .mri, &.{ .rm32, .r32, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .none, .none }, - .{ .shld, .mri, &.{ .rm64, .r64, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .long, .none }, - .{ .shld, .mrc, &.{ .rm32, .r32, .cl }, &.{ 0x0f, 0xa5 }, 0, .none, .none }, - .{ .shld, .mrc, &.{ .rm64, .r64, .cl }, &.{ 0x0f, 0xa5 }, 0, .long, .none }, - - .{ .shr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 5, .none, .none }, - .{ .shr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 5, .rex, .none }, - .{ .shr, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 5, .none, .none }, - .{ .shr, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 5, .none, .none }, - .{ .shr, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 5, .long, .none }, - .{ .shr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 5, .none, .none }, - .{ .shr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 5, .rex, .none }, - .{ .shr, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 5, .none, .none }, - .{ .shr, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 5, .none, .none }, - .{ .shr, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 5, .long, .none }, - .{ .shr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 5, .none, .none }, - .{ .shr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 5, .rex, .none }, - .{ .shr, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 5, .none, .none }, - .{ .shr, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 5, .none, .none }, - .{ .shr, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 5, .long, .none }, - - .{ .shrd, .mri, &.{ .rm16, .r16, .imm8 }, &.{ 0x0f, 0xac }, 0, .none, .none }, - .{ .shrd, .mrc, &.{ .rm16, .r16, .cl }, &.{ 0x0f, 0xad }, 0, .none, .none }, - .{ .shrd, .mri, &.{ .rm32, .r32, .imm8 }, &.{ 0x0f, 0xac }, 0, .none, .none }, - .{ .shrd, .mri, &.{ .rm64, .r64, .imm8 }, &.{ 0x0f, 0xac }, 0, .long, .none }, - .{ .shrd, .mrc, &.{ .rm32, .r32, .cl }, &.{ 0x0f, 0xad }, 0, .none, .none }, - .{ .shrd, .mrc, &.{ .rm64, .r64, .cl }, &.{ 0x0f, 0xad }, 0, .long, .none }, - - .{ .stos, .np, &.{ .m8 }, &.{ 0xaa }, 0, .none, .none }, - .{ .stos, .np, &.{ .m16 }, &.{ 0xab }, 0, .none, .none }, - .{ .stos, .np, &.{ .m32 }, &.{ 0xab }, 0, .none, .none }, - .{ .stos, .np, &.{ .m64 }, &.{ 0xab }, 0, .long, .none }, + .{ .shl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .none, .none }, + .{ .shl, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 4, .rex, .none }, + .{ .shl, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 4, .short, .none }, + .{ .shl, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 4, .none, .none }, + .{ .shl, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 4, .long, .none }, + .{ .shl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .none, .none }, + .{ .shl, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 4, .rex, .none }, + .{ .shl, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 4, .short, .none }, + .{ .shl, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 4, .none, .none }, + .{ .shl, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 4, .long, .none }, + .{ .shl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .none, .none }, + .{ .shl, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 4, .rex, .none }, + .{ .shl, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 4, .short, .none }, + .{ .shl, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 4, .none, .none }, + .{ .shl, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 4, .long, .none }, + + .{ .shld, .mri, &.{ .rm16, .r16, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .short, .none }, + .{ .shld, .mrc, &.{ .rm16, .r16, .cl }, &.{ 0x0f, 0xa5 }, 0, .short, .none }, + .{ .shld, .mri, &.{ .rm32, .r32, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .none, .none }, + .{ .shld, .mri, &.{ .rm64, .r64, .imm8 }, &.{ 0x0f, 0xa4 }, 0, .long, .none }, + .{ .shld, .mrc, &.{ .rm32, .r32, .cl }, &.{ 0x0f, 0xa5 }, 0, .none, .none }, + .{ .shld, .mrc, &.{ .rm64, .r64, .cl }, &.{ 0x0f, 0xa5 }, 0, .long, .none }, + + .{ .shr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 5, .none, .none }, + .{ .shr, .m1, &.{ .rm8, .unity }, &.{ 0xd0 }, 5, .rex, .none }, + .{ .shr, .m1, &.{ .rm16, .unity }, &.{ 0xd1 }, 5, .short, .none }, + .{ .shr, .m1, &.{ .rm32, .unity }, &.{ 0xd1 }, 5, .none, .none }, + .{ .shr, .m1, &.{ .rm64, .unity }, &.{ 0xd1 }, 5, .long, .none }, + .{ .shr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 5, .none, .none }, + .{ .shr, .mc, &.{ .rm8, .cl }, &.{ 0xd2 }, 5, .rex, .none }, + .{ .shr, .mc, &.{ .rm16, .cl }, &.{ 0xd3 }, 5, .short, .none }, + .{ .shr, .mc, &.{ .rm32, .cl }, &.{ 0xd3 }, 5, .none, .none }, + .{ .shr, .mc, &.{ .rm64, .cl }, &.{ 0xd3 }, 5, .long, .none }, + .{ .shr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 5, .none, .none }, + .{ .shr, .mi, &.{ .rm8, .imm8 }, &.{ 0xc0 }, 5, .rex, .none }, + .{ .shr, .mi, &.{ .rm16, .imm8 }, &.{ 0xc1 }, 5, .short, .none }, + .{ .shr, .mi, &.{ .rm32, .imm8 }, &.{ 0xc1 }, 5, .none, .none }, + .{ .shr, .mi, &.{ .rm64, .imm8 }, &.{ 0xc1 }, 5, .long, .none }, + + .{ .shrd, .mri, &.{ .rm16, .r16, .imm8 }, &.{ 0x0f, 0xac }, 0, .short, .none }, + .{ .shrd, .mrc, &.{ .rm16, .r16, .cl }, &.{ 0x0f, 0xad }, 0, .short, .none }, + .{ .shrd, .mri, &.{ .rm32, .r32, .imm8 }, &.{ 0x0f, 0xac }, 0, .none, .none }, + .{ .shrd, .mri, &.{ .rm64, .r64, .imm8 }, &.{ 0x0f, 0xac }, 0, .long, .none }, + .{ .shrd, .mrc, &.{ .rm32, .r32, .cl }, &.{ 0x0f, 0xad }, 0, .none, .none }, + .{ .shrd, .mrc, &.{ .rm64, .r64, .cl }, &.{ 0x0f, 0xad }, 0, .long, .none }, + + .{ .stos, .np, &.{ .m8 }, &.{ 0xaa }, 0, .none, .none }, + .{ .stos, .np, &.{ .m16 }, &.{ 0xab }, 0, .short, .none }, + .{ .stos, .np, &.{ .m32 }, &.{ 0xab }, 0, .none, .none }, + .{ .stos, .np, &.{ .m64 }, &.{ 0xab }, 0, .long, .none }, .{ .stosb, .np, &.{}, &.{ 0xaa }, 0, .none, .none }, .{ .stosw, .np, &.{}, &.{ 0xab }, 0, .short, .none }, .{ .stosd, .np, &.{}, &.{ 0xab }, 0, .none, .none }, .{ .stosq, .np, &.{}, &.{ 0xab }, 0, .long, .none }, - .{ .sub, .zi, &.{ .al, .imm8 }, &.{ 0x2c }, 0, .none, .none }, - .{ .sub, .zi, &.{ .ax, .imm16 }, &.{ 0x2d }, 0, .none, .none }, - .{ .sub, .zi, &.{ .eax, .imm32 }, &.{ 0x2d }, 0, .none, .none }, - .{ .sub, .zi, &.{ .rax, .imm32s }, &.{ 0x2d }, 0, .long, .none }, - .{ .sub, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 5, .none, .none }, - .{ .sub, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 5, .rex, .none }, - .{ .sub, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 5, .none, .none }, - .{ .sub, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 5, .none, .none }, - .{ .sub, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 5, .long, .none }, - .{ .sub, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 5, .none, .none }, - .{ .sub, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 5, .none, .none }, - .{ .sub, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 5, .long, .none }, - .{ .sub, .mr, &.{ .rm8, .r8 }, &.{ 0x28 }, 0, .none, .none }, - .{ .sub, .mr, &.{ .rm8, .r8 }, &.{ 0x28 }, 0, .rex, .none }, - .{ .sub, .mr, &.{ .rm16, .r16 }, &.{ 0x29 }, 0, .none, .none }, - .{ .sub, .mr, &.{ .rm32, .r32 }, &.{ 0x29 }, 0, .none, .none }, - .{ .sub, .mr, &.{ .rm64, .r64 }, &.{ 0x29 }, 0, .long, .none }, - .{ .sub, .rm, &.{ .r8, .rm8 }, &.{ 0x2a }, 0, .none, .none }, - .{ .sub, .rm, &.{ .r8, .rm8 }, &.{ 0x2a }, 0, .rex, .none }, - .{ .sub, .rm, &.{ .r16, .rm16 }, &.{ 0x2b }, 0, .none, .none }, - .{ .sub, .rm, &.{ .r32, .rm32 }, &.{ 0x2b }, 0, .none, .none }, - .{ .sub, .rm, &.{ .r64, .rm64 }, &.{ 0x2b }, 0, .long, .none }, + .{ .sub, .zi, &.{ .al, .imm8 }, &.{ 0x2c }, 0, .none, .none }, + .{ .sub, .zi, &.{ .ax, .imm16 }, &.{ 0x2d }, 0, .short, .none }, + .{ .sub, .zi, &.{ .eax, .imm32 }, &.{ 0x2d }, 0, .none, .none }, + .{ .sub, .zi, &.{ .rax, .imm32s }, &.{ 0x2d }, 0, .long, .none }, + .{ .sub, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 5, .none, .none }, + .{ .sub, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 5, .rex, .none }, + .{ .sub, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 5, .short, .none }, + .{ .sub, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 5, .none, .none }, + .{ .sub, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 5, .long, .none }, + .{ .sub, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 5, .short, .none }, + .{ .sub, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 5, .none, .none }, + .{ .sub, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 5, .long, .none }, + .{ .sub, .mr, &.{ .rm8, .r8 }, &.{ 0x28 }, 0, .none, .none }, + .{ .sub, .mr, &.{ .rm8, .r8 }, &.{ 0x28 }, 0, .rex, .none }, + .{ .sub, .mr, &.{ .rm16, .r16 }, &.{ 0x29 }, 0, .short, .none }, + .{ .sub, .mr, &.{ .rm32, .r32 }, &.{ 0x29 }, 0, .none, .none }, + .{ .sub, .mr, &.{ .rm64, .r64 }, &.{ 0x29 }, 0, .long, .none }, + .{ .sub, .rm, &.{ .r8, .rm8 }, &.{ 0x2a }, 0, .none, .none }, + .{ .sub, .rm, &.{ .r8, .rm8 }, &.{ 0x2a }, 0, .rex, .none }, + .{ .sub, .rm, &.{ .r16, .rm16 }, &.{ 0x2b }, 0, .short, .none }, + .{ .sub, .rm, &.{ .r32, .rm32 }, &.{ 0x2b }, 0, .none, .none }, + .{ .sub, .rm, &.{ .r64, .rm64 }, &.{ 0x2b }, 0, .long, .none }, .{ .syscall, .np, &.{}, &.{ 0x0f, 0x05 }, 0, .none, .none }, - .{ .@"test", .zi, &.{ .al, .imm8 }, &.{ 0xa8 }, 0, .none, .none }, - .{ .@"test", .zi, &.{ .ax, .imm16 }, &.{ 0xa9 }, 0, .none, .none }, - .{ .@"test", .zi, &.{ .eax, .imm32 }, &.{ 0xa9 }, 0, .none, .none }, - .{ .@"test", .zi, &.{ .rax, .imm32s }, &.{ 0xa9 }, 0, .long, .none }, - .{ .@"test", .mi, &.{ .rm8, .imm8 }, &.{ 0xf6 }, 0, .none, .none }, - .{ .@"test", .mi, &.{ .rm8, .imm8 }, &.{ 0xf6 }, 0, .rex, .none }, - .{ .@"test", .mi, &.{ .rm16, .imm16 }, &.{ 0xf7 }, 0, .none, .none }, - .{ .@"test", .mi, &.{ .rm32, .imm32 }, &.{ 0xf7 }, 0, .none, .none }, - .{ .@"test", .mi, &.{ .rm64, .imm32s }, &.{ 0xf7 }, 0, .long, .none }, - .{ .@"test", .mr, &.{ .rm8, .r8 }, &.{ 0x84 }, 0, .none, .none }, - .{ .@"test", .mr, &.{ .rm8, .r8 }, &.{ 0x84 }, 0, .rex, .none }, - .{ .@"test", .mr, &.{ .rm16, .r16 }, &.{ 0x85 }, 0, .none, .none }, - .{ .@"test", .mr, &.{ .rm32, .r32 }, &.{ 0x85 }, 0, .none, .none }, - .{ .@"test", .mr, &.{ .rm64, .r64 }, &.{ 0x85 }, 0, .long, .none }, - - .{ .tzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .none, .none }, - .{ .tzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .none, .none }, - .{ .tzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .long, .none }, + .{ .@"test", .zi, &.{ .al, .imm8 }, &.{ 0xa8 }, 0, .none, .none }, + .{ .@"test", .zi, &.{ .ax, .imm16 }, &.{ 0xa9 }, 0, .short, .none }, + .{ .@"test", .zi, &.{ .eax, .imm32 }, &.{ 0xa9 }, 0, .none, .none }, + .{ .@"test", .zi, &.{ .rax, .imm32s }, &.{ 0xa9 }, 0, .long, .none }, + .{ .@"test", .mi, &.{ .rm8, .imm8 }, &.{ 0xf6 }, 0, .none, .none }, + .{ .@"test", .mi, &.{ .rm8, .imm8 }, &.{ 0xf6 }, 0, .rex, .none }, + .{ .@"test", .mi, &.{ .rm16, .imm16 }, &.{ 0xf7 }, 0, .short, .none }, + .{ .@"test", .mi, &.{ .rm32, .imm32 }, &.{ 0xf7 }, 0, .none, .none }, + .{ .@"test", .mi, &.{ .rm64, .imm32s }, &.{ 0xf7 }, 0, .long, .none }, + .{ .@"test", .mr, &.{ .rm8, .r8 }, &.{ 0x84 }, 0, .none, .none }, + .{ .@"test", .mr, &.{ .rm8, .r8 }, &.{ 0x84 }, 0, .rex, .none }, + .{ .@"test", .mr, &.{ .rm16, .r16 }, &.{ 0x85 }, 0, .short, .none }, + .{ .@"test", .mr, &.{ .rm32, .r32 }, &.{ 0x85 }, 0, .none, .none }, + .{ .@"test", .mr, &.{ .rm64, .r64 }, &.{ 0x85 }, 0, .long, .none }, + + .{ .tzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .short, .none }, + .{ .tzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .none, .none }, + .{ .tzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .long, .none }, .{ .ud2, .np, &.{}, &.{ 0x0f, 0x0b }, 0, .none, .none }, - .{ .xadd, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xc0 }, 0, .none, .none }, - .{ .xadd, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xc0 }, 0, .rex, .none }, - .{ .xadd, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xc1 }, 0, .none, .none }, - .{ .xadd, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xc1 }, 0, .none, .none }, - .{ .xadd, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xc1 }, 0, .long, .none }, - - .{ .xchg, .o, &.{ .ax, .r16 }, &.{ 0x90 }, 0, .none, .none }, - .{ .xchg, .o, &.{ .r16, .ax }, &.{ 0x90 }, 0, .none, .none }, - .{ .xchg, .o, &.{ .eax, .r32 }, &.{ 0x90 }, 0, .none, .none }, - .{ .xchg, .o, &.{ .rax, .r64 }, &.{ 0x90 }, 0, .long, .none }, - .{ .xchg, .o, &.{ .r32, .eax }, &.{ 0x90 }, 0, .none, .none }, - .{ .xchg, .o, &.{ .r64, .rax }, &.{ 0x90 }, 0, .long, .none }, - .{ .xchg, .mr, &.{ .rm8, .r8 }, &.{ 0x86 }, 0, .none, .none }, - .{ .xchg, .mr, &.{ .rm8, .r8 }, &.{ 0x86 }, 0, .rex, .none }, - .{ .xchg, .rm, &.{ .r8, .rm8 }, &.{ 0x86 }, 0, .none, .none }, - .{ .xchg, .rm, &.{ .r8, .rm8 }, &.{ 0x86 }, 0, .rex, .none }, - .{ .xchg, .mr, &.{ .rm16, .r16 }, &.{ 0x87 }, 0, .none, .none }, - .{ .xchg, .rm, &.{ .r16, .rm16 }, &.{ 0x87 }, 0, .none, .none }, - .{ .xchg, .mr, &.{ .rm32, .r32 }, &.{ 0x87 }, 0, .none, .none }, - .{ .xchg, .mr, &.{ .rm64, .r64 }, &.{ 0x87 }, 0, .long, .none }, - .{ .xchg, .rm, &.{ .r32, .rm32 }, &.{ 0x87 }, 0, .none, .none }, - .{ .xchg, .rm, &.{ .r64, .rm64 }, &.{ 0x87 }, 0, .long, .none }, - - .{ .xor, .zi, &.{ .al, .imm8 }, &.{ 0x34 }, 0, .none, .none }, - .{ .xor, .zi, &.{ .ax, .imm16 }, &.{ 0x35 }, 0, .none, .none }, - .{ .xor, .zi, &.{ .eax, .imm32 }, &.{ 0x35 }, 0, .none, .none }, - .{ .xor, .zi, &.{ .rax, .imm32s }, &.{ 0x35 }, 0, .long, .none }, - .{ .xor, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 6, .none, .none }, - .{ .xor, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 6, .rex, .none }, - .{ .xor, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 6, .none, .none }, - .{ .xor, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 6, .none, .none }, - .{ .xor, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 6, .long, .none }, - .{ .xor, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 6, .none, .none }, - .{ .xor, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 6, .none, .none }, - .{ .xor, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 6, .long, .none }, - .{ .xor, .mr, &.{ .rm8, .r8 }, &.{ 0x30 }, 0, .none, .none }, - .{ .xor, .mr, &.{ .rm8, .r8 }, &.{ 0x30 }, 0, .rex, .none }, - .{ .xor, .mr, &.{ .rm16, .r16 }, &.{ 0x31 }, 0, .none, .none }, - .{ .xor, .mr, &.{ .rm32, .r32 }, &.{ 0x31 }, 0, .none, .none }, - .{ .xor, .mr, &.{ .rm64, .r64 }, &.{ 0x31 }, 0, .long, .none }, - .{ .xor, .rm, &.{ .r8, .rm8 }, &.{ 0x32 }, 0, .none, .none }, - .{ .xor, .rm, &.{ .r8, .rm8 }, &.{ 0x32 }, 0, .rex, .none }, - .{ .xor, .rm, &.{ .r16, .rm16 }, &.{ 0x33 }, 0, .none, .none }, - .{ .xor, .rm, &.{ .r32, .rm32 }, &.{ 0x33 }, 0, .none, .none }, - .{ .xor, .rm, &.{ .r64, .rm64 }, &.{ 0x33 }, 0, .long, .none }, + .{ .xadd, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xc0 }, 0, .none, .none }, + .{ .xadd, .mr, &.{ .rm8, .r8 }, &.{ 0x0f, 0xc0 }, 0, .rex, .none }, + .{ .xadd, .mr, &.{ .rm16, .r16 }, &.{ 0x0f, 0xc1 }, 0, .short, .none }, + .{ .xadd, .mr, &.{ .rm32, .r32 }, &.{ 0x0f, 0xc1 }, 0, .none, .none }, + .{ .xadd, .mr, &.{ .rm64, .r64 }, &.{ 0x0f, 0xc1 }, 0, .long, .none }, + + .{ .xchg, .o, &.{ .ax, .r16 }, &.{ 0x90 }, 0, .short, .none }, + .{ .xchg, .o, &.{ .r16, .ax }, &.{ 0x90 }, 0, .short, .none }, + .{ .xchg, .o, &.{ .eax, .r32 }, &.{ 0x90 }, 0, .none, .none }, + .{ .xchg, .o, &.{ .rax, .r64 }, &.{ 0x90 }, 0, .long, .none }, + .{ .xchg, .o, &.{ .r32, .eax }, &.{ 0x90 }, 0, .none, .none }, + .{ .xchg, .o, &.{ .r64, .rax }, &.{ 0x90 }, 0, .long, .none }, + .{ .xchg, .mr, &.{ .rm8, .r8 }, &.{ 0x86 }, 0, .none, .none }, + .{ .xchg, .mr, &.{ .rm8, .r8 }, &.{ 0x86 }, 0, .rex, .none }, + .{ .xchg, .rm, &.{ .r8, .rm8 }, &.{ 0x86 }, 0, .none, .none }, + .{ .xchg, .rm, &.{ .r8, .rm8 }, &.{ 0x86 }, 0, .rex, .none }, + .{ .xchg, .mr, &.{ .rm16, .r16 }, &.{ 0x87 }, 0, .short, .none }, + .{ .xchg, .rm, &.{ .r16, .rm16 }, &.{ 0x87 }, 0, .short, .none }, + .{ .xchg, .mr, &.{ .rm32, .r32 }, &.{ 0x87 }, 0, .none, .none }, + .{ .xchg, .mr, &.{ .rm64, .r64 }, &.{ 0x87 }, 0, .long, .none }, + .{ .xchg, .rm, &.{ .r32, .rm32 }, &.{ 0x87 }, 0, .none, .none }, + .{ .xchg, .rm, &.{ .r64, .rm64 }, &.{ 0x87 }, 0, .long, .none }, + + .{ .xor, .zi, &.{ .al, .imm8 }, &.{ 0x34 }, 0, .none, .none }, + .{ .xor, .zi, &.{ .ax, .imm16 }, &.{ 0x35 }, 0, .short, .none }, + .{ .xor, .zi, &.{ .eax, .imm32 }, &.{ 0x35 }, 0, .none, .none }, + .{ .xor, .zi, &.{ .rax, .imm32s }, &.{ 0x35 }, 0, .long, .none }, + .{ .xor, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 6, .none, .none }, + .{ .xor, .mi, &.{ .rm8, .imm8 }, &.{ 0x80 }, 6, .rex, .none }, + .{ .xor, .mi, &.{ .rm16, .imm16 }, &.{ 0x81 }, 6, .short, .none }, + .{ .xor, .mi, &.{ .rm32, .imm32 }, &.{ 0x81 }, 6, .none, .none }, + .{ .xor, .mi, &.{ .rm64, .imm32s }, &.{ 0x81 }, 6, .long, .none }, + .{ .xor, .mi, &.{ .rm16, .imm8s }, &.{ 0x83 }, 6, .short, .none }, + .{ .xor, .mi, &.{ .rm32, .imm8s }, &.{ 0x83 }, 6, .none, .none }, + .{ .xor, .mi, &.{ .rm64, .imm8s }, &.{ 0x83 }, 6, .long, .none }, + .{ .xor, .mr, &.{ .rm8, .r8 }, &.{ 0x30 }, 0, .none, .none }, + .{ .xor, .mr, &.{ .rm8, .r8 }, &.{ 0x30 }, 0, .rex, .none }, + .{ .xor, .mr, &.{ .rm16, .r16 }, &.{ 0x31 }, 0, .short, .none }, + .{ .xor, .mr, &.{ .rm32, .r32 }, &.{ 0x31 }, 0, .none, .none }, + .{ .xor, .mr, &.{ .rm64, .r64 }, &.{ 0x31 }, 0, .long, .none }, + .{ .xor, .rm, &.{ .r8, .rm8 }, &.{ 0x32 }, 0, .none, .none }, + .{ .xor, .rm, &.{ .r8, .rm8 }, &.{ 0x32 }, 0, .rex, .none }, + .{ .xor, .rm, &.{ .r16, .rm16 }, &.{ 0x33 }, 0, .short, .none }, + .{ .xor, .rm, &.{ .r32, .rm32 }, &.{ 0x33 }, 0, .none, .none }, + .{ .xor, .rm, &.{ .r64, .rm64 }, &.{ 0x33 }, 0, .long, .none }, // SSE .{ .addss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .none, .sse }, @@ -911,9 +913,39 @@ pub const table = [_]Entry{ .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .none, .sse2 }, - .{ .pextrw, .mri, &.{ .r16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .none, .sse2 }, + .{ .pextrw, .rmi, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .none, .sse2 }, + .{ .pextrw, .rmi, &.{ .r64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .long, .sse2 }, - .{ .pinsrw, .rmi, &.{ .xmm, .rm16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .none, .sse2 }, + .{ .pinsrw, .rmi, &.{ .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .none, .sse2 }, + + .{ .pshufhw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf3, 0x0f, 0x70 }, 0, .none, .sse2 }, + + .{ .pshuflw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf2, 0x0f, 0x70 }, 0, .none, .sse2 }, + + .{ .psrld, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .none, .sse2 }, + .{ .psrld, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 2, .none, .sse2 }, + + .{ .psrlq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .none, .sse2 }, + .{ .psrlq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .none, .sse2 }, + + .{ .psrlw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .none, .sse2 }, + .{ .psrlw, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .none, .sse2 }, + + .{ .punpckhbw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x68 }, 0, .none, .sse2 }, + + .{ .punpckhdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6a }, 0, .none, .sse2 }, + + .{ .punpckhqdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6d }, 0, .none, .sse2 }, + + .{ .punpckhwd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x69 }, 0, .none, .sse2 }, + + .{ .punpcklbw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x60 }, 0, .none, .sse2 }, + + .{ .punpckldq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .none, .sse2 }, + + .{ .punpcklqdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .none, .sse2 }, + + .{ .punpcklwd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x61 }, 0, .none, .sse2 }, .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .none, .sse2 }, .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .none, .sse2 }, @@ -927,12 +959,59 @@ pub const table = [_]Entry{ .{ .xorpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .none, .sse2 }, + // SSE3 + .{ .movddup, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .none, .sse3 }, + + .{ .movshdup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x16 }, 0, .none, .sse3 }, + + .{ .movsldup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .none, .sse3 }, + // SSE4.1 - .{ .pextrw, .mri, &.{ .rm16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .none, .sse4_1 }, + .{ .pextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .none, .sse4_1 }, + .{ .pextrw, .mri, &.{ .r64_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .long, .sse4_1 }, .{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .none, .sse4_1 }, .{ .roundsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .none, .sse4_1 }, + // AVX + .{ .vmovddup, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_128, .avx }, + + .{ .vmovshdup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x16 }, 0, .vex_128, .avx }, + + .{ .vmovsldup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .vex_128, .avx }, + + .{ .vpextrw, .mri, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x15 }, 0, .vex_128, .avx }, + .{ .vpextrw, .mri, &.{ .r64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x15 }, 0, .vex_128_long, .avx }, + .{ .vpextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .vex_128, .avx }, + .{ .vpextrw, .mri, &.{ .r64_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .vex_128_long, .avx }, + + .{ .vpinsrw, .rvmi, &.{ .xmm, .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .vex_128, .avx }, + + .{ .vpsrld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_128, .avx }, + .{ .vpsrld, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 2, .vex_128, .avx }, + + .{ .vpsrlq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .vex_128, .avx }, + .{ .vpsrlq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .vex_128, .avx }, + + .{ .vpsrlw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_128, .avx }, + .{ .vpsrlw, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_128, .avx }, + + .{ .vpunpckhbw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x68 }, 0, .vex_128, .avx }, + + .{ .vpunpckhdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6a }, 0, .vex_128, .avx }, + + .{ .vpunpckhqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6d }, 0, .vex_128, .avx }, + + .{ .vpunpckhwd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x69 }, 0, .vex_128, .avx }, + + .{ .vpunpcklbw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x60 }, 0, .vex_128, .avx }, + + .{ .vpunpckldq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .vex_128, .avx }, + + .{ .vpunpcklqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .vex_128, .avx }, + + .{ .vpunpcklwd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x61 }, 0, .vex_128, .avx }, + // F16C .{ .vcvtph2ps, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_128, .f16c }, -- cgit v1.2.3 From 5d4288c5f6c69bdd4cbd9b3580016828e38f087d Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 6 May 2023 06:00:22 -0400 Subject: x86_64: fix unordered float equality --- src/arch/x86_64/CodeGen.zig | 415 +++++++++++++++++++++++-------------- src/arch/x86_64/Emit.zig | 273 ++++++++++++------------ src/arch/x86_64/Lower.zig | 220 +++++++++++++++----- src/arch/x86_64/Mir.zig | 34 +-- src/arch/x86_64/bits.zig | 9 + src/arch/x86_64/encoder.zig | 4 +- test/behavior/bugs/12891.zig | 6 - test/behavior/field_parent_ptr.zig | 1 - 8 files changed, 599 insertions(+), 363 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index d24428467a..87eceec347 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -205,16 +205,7 @@ pub const MCValue = union(enum) { fn isMemory(mcv: MCValue) bool { return switch (mcv) { - .memory, - .load_direct, - .lea_direct, - .load_got, - .lea_got, - .load_tlv, - .lea_tlv, - .load_frame, - .lea_frame, - => true, + .memory, .indirect, .load_frame => true, else => false, }; } @@ -937,7 +928,7 @@ fn formatWipMir( .target = data.self.target, .src_loc = data.self.src_loc, }; - for (lower.lowerMir(data.self.mir_instructions.get(data.inst)) catch |err| switch (err) { + for ((lower.lowerMir(data.inst) catch |err| switch (err) { error.LowerFail => { defer { lower.err_msg.?.deinit(data.self.gpa); @@ -955,7 +946,7 @@ fn formatWipMir( return; }, else => |e| return e, - }) |lower_inst| try writer.print(" | {}", .{lower_inst}); + }).insts) |lowered_inst| try writer.print(" | {}", .{lowered_inst}); } fn fmtWipMir(self: *Self, inst: Mir.Inst.Index) std.fmt.Formatter(formatWipMir) { return .{ .data = .{ .self = self, .inst = inst } }; @@ -1016,7 +1007,14 @@ fn asmSetccRegister(self: *Self, reg: Register, cc: bits.Condition) !void { _ = try self.addInst(.{ .tag = .setcc, .ops = .r_cc, - .data = .{ .r_cc = .{ .r = reg, .cc = cc } }, + .data = .{ .r_cc = .{ + .r = reg, + .scratch = if (cc == .z_and_np or cc == .nz_or_p) + (try self.register_manager.allocReg(null, gp)).to8() + else + .none, + .cc = cc, + } }, }); } @@ -1028,23 +1026,36 @@ fn asmSetccMemory(self: *Self, m: Memory, cc: bits.Condition) !void { .rip => .m_rip_cc, else => unreachable, }, - .data = .{ .x_cc = .{ .cc = cc, .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - } } }, + .data = .{ .x_cc = .{ + .scratch = if (cc == .z_and_np or cc == .nz_or_p) + (try self.register_manager.allocReg(null, gp)).to8() + else + .none, + .cc = cc, + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, + } }, }); } +/// A `cc` of `.z_and_np` clobbers `reg2`! fn asmCmovccRegisterRegister(self: *Self, reg1: Register, reg2: Register, cc: bits.Condition) !void { _ = try self.addInst(.{ .tag = .cmovcc, .ops = .rr_cc, - .data = .{ .rr_cc = .{ .r1 = reg1, .r2 = reg2, .cc = cc } }, + .data = .{ .rr_cc = .{ + .r1 = reg1, + .r2 = reg2, + .cc = cc, + } }, }); } fn asmCmovccRegisterMemory(self: *Self, reg: Register, m: Memory, cc: bits.Condition) !void { + assert(cc != .z_and_np); // not supported _ = try self.addInst(.{ .tag = .cmovcc, .ops = switch (m) { @@ -1052,11 +1063,15 @@ fn asmCmovccRegisterMemory(self: *Self, reg: Register, m: Memory, cc: bits.Condi .rip => .rm_rip_cc, else => unreachable, }, - .data = .{ .rx_cc = .{ .r = reg, .cc = cc, .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - } } }, + .data = .{ .rx_cc = .{ + .r = reg, + .cc = cc, + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, + } }, }); } @@ -1131,10 +1146,13 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.Tag, reg: Register, imm: Imme .tag = tag, .ops = ops, .data = switch (ops) { - .ri_s, .ri_u => .{ .ri = .{ .r = reg, .i = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), - } } }, + .ri_s, .ri_u => .{ .ri = .{ + .r = reg, + .i = switch (imm) { + .signed => |s| @bitCast(u32, s), + .unsigned => |u| @intCast(u32, u), + }, + } }, .ri64 => .{ .rx = .{ .r = reg, .payload = try self.addExtra(Mir.Imm64.encode(imm.unsigned)), @@ -1171,10 +1189,14 @@ fn asmRegisterRegisterImmediate( .signed => .rri_s, .unsigned => .rri_u, }, - .data = .{ .rri = .{ .r1 = reg1, .r2 = reg2, .i = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), - } } }, + .data = .{ .rri = .{ + .r1 = reg1, + .r2 = reg2, + .i = switch (imm) { + .signed => |s| @bitCast(u32, s), + .unsigned => |u| @intCast(u32, u), + }, + } }, }); } @@ -1202,11 +1224,14 @@ fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) ! .rip => .rm_rip, else => unreachable, }, - .data = .{ .rx = .{ .r = reg, .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - } } }, + .data = .{ .rx = .{ + .r = reg, + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, + } }, }); } @@ -1224,11 +1249,43 @@ fn asmRegisterMemoryImmediate( .rip => .rmi_rip, else => unreachable, }, - .data = .{ .rix = .{ .r = reg, .i = @intCast(u8, imm.unsigned), .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + .data = .{ .rix = .{ + .r = reg, + .i = @intCast(u8, imm.unsigned), + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, + } }, + }); +} + +fn asmRegisterRegisterMemoryImmediate( + self: *Self, + tag: Mir.Inst.Tag, + reg1: Register, + reg2: Register, + m: Memory, + imm: Immediate, +) !void { + _ = try self.addInst(.{ + .tag = tag, + .ops = switch (m) { + .sib => .rrmi_sib, + .rip => .rrmi_rip, else => unreachable, - } } }, + }, + .data = .{ .rrix = .{ + .r1 = reg1, + .r2 = reg2, + .i = @intCast(u8, imm.unsigned), + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, + } }, }); } @@ -1240,11 +1297,14 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) ! .rip => .mr_rip, else => unreachable, }, - .data = .{ .rx = .{ .r = reg, .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - } } }, + .data = .{ .rx = .{ + .r = reg, + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, + } }, }); } @@ -1262,14 +1322,17 @@ fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.Tag, m: Memory, imm: Immediate) }, else => unreachable, }, - .data = .{ .ix = .{ .i = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), - }, .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - } } }, + .data = .{ .ix = .{ + .i = switch (imm) { + .signed => |s| @bitCast(u32, s), + .unsigned => |u| @intCast(u32, u), + }, + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, + } }, }); } @@ -6612,11 +6675,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier _ = try self.addInst(.{ .tag = .mov_linker, .ops = .import_reloc, - .data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{ - .reg = @enumToInt(Register.rax), - .atom_index = atom_index, - .sym_index = sym_index, - }) }, + .data = .{ .rx = .{ + .r = .rax, + .payload = try self.addExtra(Mir.Reloc{ + .atom_index = atom_index, + .sym_index = sym_index, + }), + } }, }); try self.asmRegister(.call, .rax); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { @@ -6695,8 +6760,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); - const ty_abi_size = ty.abiSize(self.target.*); - const can_reuse = ty_abi_size <= 8; try self.spillEflagsIfOccupied(); self.eflags_inst = inst; @@ -6715,69 +6778,93 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - const dst_mem_ok = !ty.isRuntimeFloat(); - var flipped = false; - const dst_mcv: MCValue = if (can_reuse and !lhs_mcv.isImmediate() and - (dst_mem_ok or lhs_mcv.isRegister()) and self.liveness.operandDies(inst, 0)) - lhs_mcv - else if (can_reuse and !rhs_mcv.isImmediate() and - (dst_mem_ok or rhs_mcv.isRegister()) and self.liveness.operandDies(inst, 1)) - dst: { - flipped = true; - break :dst rhs_mcv; - } else if (dst_mem_ok) dst: { - const dst_mcv = try self.allocTempRegOrMem(ty, true); - try self.genCopy(ty, dst_mcv, lhs_mcv); - break :dst dst_mcv; - } else .{ .register = try self.copyToTmpRegister(ty, lhs_mcv) }; - const dst_lock = switch (dst_mcv) { - .register => |reg| self.register_manager.lockReg(reg), - else => null, - }; - defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - - const src_mcv = if (flipped) lhs_mcv else rhs_mcv; - switch (ty.zigTypeTag()) { - else => try self.genBinOpMir(.cmp, ty, dst_mcv, src_mcv), - .Float => switch (ty.floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) { - const dst_reg = dst_mcv.getReg().?.to128(); + const result = MCValue{ + .eflags = switch (ty.zigTypeTag()) { + else => result: { + var flipped = false; + const dst_mcv: MCValue = if (lhs_mcv.isRegister() or lhs_mcv.isMemory()) + lhs_mcv + else if (rhs_mcv.isRegister() or rhs_mcv.isMemory()) dst: { + flipped = true; + break :dst rhs_mcv; + } else .{ .register = try self.copyToTmpRegister(ty, lhs_mcv) }; + const dst_lock = switch (dst_mcv) { + .register => |reg| self.register_manager.lockReg(reg), + else => null, + }; + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + const src_mcv = if (flipped) lhs_mcv else rhs_mcv; - const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); - const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); - defer self.register_manager.unlockReg(tmp_lock); + try self.genBinOpMir(.cmp, ty, dst_mcv, src_mcv); + break :result Condition.fromCompareOperator( + if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned, + if (flipped) op.reverse() else op, + ); + }, + .Float => result: { + const flipped = switch (op) { + .lt, .lte => true, + .eq, .gte, .gt, .neq => false, + }; - if (src_mcv.isRegister()) - try self.asmRegisterRegisterRegister( - .vpunpcklwd, - dst_reg, - dst_reg, - src_mcv.getReg().?.to128(), - ) + const dst_mcv = if (flipped) rhs_mcv else lhs_mcv; + const dst_reg = if (dst_mcv.isRegister()) + dst_mcv.getReg().? else - try self.asmRegisterMemoryImmediate( - .vpinsrw, - dst_reg, - src_mcv.mem(.word), - Immediate.u(1), - ); - try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg); - try self.asmRegisterRegister(.vmovshdup, tmp_reg, dst_reg); - try self.genBinOpMir(.ucomiss, ty, dst_mcv, .{ .register = tmp_reg }); - } else return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), - }), - 32 => try self.genBinOpMir(.ucomiss, ty, dst_mcv, src_mcv), - 64 => try self.genBinOpMir(.ucomisd, ty, dst_mcv, src_mcv), - else => return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), - }), - }, - } + try self.copyToTmpRegister(ty, dst_mcv); + const dst_lock = self.register_manager.lockReg(dst_reg); + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + const src_mcv = if (flipped) lhs_mcv else rhs_mcv; + + switch (ty.floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) { + const tmp1_reg = (try self.register_manager.allocReg(null, sse)).to128(); + const tmp1_mcv = MCValue{ .register = tmp1_reg }; + const tmp1_lock = self.register_manager.lockRegAssumeUnused(tmp1_reg); + defer self.register_manager.unlockReg(tmp1_lock); + + const tmp2_reg = (try self.register_manager.allocReg(null, sse)).to128(); + const tmp2_mcv = MCValue{ .register = tmp2_reg }; + const tmp2_lock = self.register_manager.lockRegAssumeUnused(tmp2_reg); + defer self.register_manager.unlockReg(tmp2_lock); + + if (src_mcv.isRegister()) + try self.asmRegisterRegisterRegister( + .vpunpcklwd, + tmp1_reg, + dst_reg.to128(), + src_mcv.getReg().?.to128(), + ) + else + try self.asmRegisterRegisterMemoryImmediate( + .vpinsrw, + tmp1_reg, + dst_reg.to128(), + src_mcv.mem(.word), + Immediate.u(1), + ); + try self.asmRegisterRegister(.vcvtph2ps, tmp1_reg, tmp1_reg); + try self.asmRegisterRegister(.vmovshdup, tmp2_reg, tmp1_reg); + try self.genBinOpMir(.ucomiss, ty, tmp1_mcv, tmp2_mcv); + } else return self.fail("TODO implement airCmp for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + 32 => try self.genBinOpMir(.ucomiss, ty, .{ .register = dst_reg }, src_mcv), + 64 => try self.genBinOpMir(.ucomisd, ty, .{ .register = dst_reg }, src_mcv), + else => return self.fail("TODO implement airCmp for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + } - const signedness = if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned; - const result = MCValue{ - .eflags = Condition.fromCompareOperator(signedness, if (flipped) op.reverse() else op), + break :result switch (if (flipped) op.reverse() else op) { + .lt, .lte => unreachable, // required to have been canonicalized to gt(e) + .gt => .a, + .gte => .ae, + .eq => .z_and_np, + .neq => .nz_or_p, + }; + }, + }, }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -7929,11 +8016,13 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr _ = try self.addInst(.{ .tag = .mov_linker, .ops = .direct_reloc, - .data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{ - .reg = @enumToInt(dst_reg.to64()), - .atom_index = atom_index, - .sym_index = sym_index, - }) }, + .data = .{ .rx = .{ + .r = dst_reg.to64(), + .payload = try self.addExtra(Mir.Reloc{ + .atom_index = atom_index, + .sym_index = sym_index, + }), + } }, }); return; }, @@ -7975,11 +8064,13 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .lea_got => .got_reloc, else => unreachable, }, - .data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{ - .reg = @enumToInt(dst_reg.to64()), - .atom_index = atom_index, - .sym_index = sym_index, - }) }, + .data = .{ .rx = .{ + .r = dst_reg.to64(), + .payload = try self.addExtra(Mir.Reloc{ + .atom_index = atom_index, + .sym_index = sym_index, + }), + } }, }); }, .lea_tlv => |sym_index| { @@ -7988,11 +8079,13 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr _ = try self.addInst(.{ .tag = .lea_linker, .ops = .tlv_reloc, - .data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{ - .reg = @enumToInt(Register.rdi), - .atom_index = atom_index, - .sym_index = sym_index, - }) }, + .data = .{ .rx = .{ + .r = .rdi, + .payload = try self.addExtra(Mir.Reloc{ + .atom_index = atom_index, + .sym_index = sym_index, + }), + } }, }); // TODO: spill registers before calling try self.asmMemory(.call, Memory.sib(.qword, .{ .base = .{ .reg = .rdi } })); @@ -8463,14 +8556,20 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { try self.spillEflagsIfOccupied(); if (val_abi_size <= 8) { - _ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{ - .r = registerAlias(new_reg.?, val_abi_size), - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } } }); + _ = try self.addInst(.{ + .tag = .cmpxchg, + .ops = .lock_mr_sib, + .data = .{ .rx = .{ + .r = registerAlias(new_reg.?, val_abi_size), + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } }, + }); } else { - _ = try self.addInst(.{ .tag = .cmpxchgb, .ops = .lock_m_sib, .data = .{ - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } }); + _ = try self.addInst(.{ + .tag = .cmpxchgb, + .ops = .lock_m_sib, + .data = .{ .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)) }, + }); } const result: MCValue = result: { @@ -8571,14 +8670,18 @@ fn atomicOp( if (rmw_op == std.builtin.AtomicRmwOp.Sub and tag == .xadd) { try self.genUnOpMir(.neg, val_ty, dst_mcv); } - _ = try self.addInst(.{ .tag = tag, .ops = switch (tag) { - .mov, .xchg => .mr_sib, - .xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib, - else => unreachable, - }, .data = .{ .rx = .{ - .r = registerAlias(dst_reg, val_abi_size), - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } } }); + _ = try self.addInst(.{ + .tag = tag, + .ops = switch (tag) { + .mov, .xchg => .mr_sib, + .xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib, + else => unreachable, + }, + .data = .{ .rx = .{ + .r = registerAlias(dst_reg, val_abi_size), + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } }, + }); return if (unused) .unreach else dst_mcv; }, @@ -8645,10 +8748,14 @@ fn atomicOp( } }, }; - _ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{ - .r = registerAlias(tmp_reg, val_abi_size), - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } } }); + _ = try self.addInst(.{ + .tag = .cmpxchg, + .ops = .lock_mr_sib, + .data = .{ .rx = .{ + .r = registerAlias(tmp_reg, val_abi_size), + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } }, + }); _ = try self.asmJccReloc(loop, .ne); return if (unused) .unreach else .{ .register = .rax }; } else { diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index c6c8f7995c..3574d52878 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -18,142 +18,149 @@ pub const Error = Lower.Error || error{ }; pub fn emitMir(emit: *Emit) Error!void { - for (0..emit.lower.mir.instructions.len) |i| { - const index = @intCast(Mir.Inst.Index, i); - const inst = emit.lower.mir.instructions.get(index); - - const start_offset = @intCast(u32, emit.code.items.len); - try emit.code_offset_mapping.putNoClobber(emit.lower.allocator, index, start_offset); - for (try emit.lower.lowerMir(inst)) |lower_inst| try lower_inst.encode(emit.code.writer(), .{}); - const end_offset = @intCast(u32, emit.code.items.len); - - switch (inst.tag) { - else => {}, - - .jmp_reloc => try emit.relocs.append(emit.lower.allocator, .{ - .source = start_offset, - .target = inst.data.inst, - .offset = end_offset - 4, - .length = 5, - }), - - .call_extern => if (emit.bin_file.cast(link.File.MachO)) |macho_file| { - // Add relocation to the decl. - const atom_index = macho_file.getAtomIndexForSymbol( - .{ .sym_index = inst.data.relocation.atom_index, .file = null }, - ).?; - const target = macho_file.getGlobalByIndex(inst.data.relocation.sym_index); - try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{ - .type = .branch, + for (0..emit.lower.mir.instructions.len) |mir_i| { + const mir_index = @intCast(Mir.Inst.Index, mir_i); + try emit.code_offset_mapping.putNoClobber( + emit.lower.allocator, + mir_index, + @intCast(u32, emit.code.items.len), + ); + const lowered = try emit.lower.lowerMir(mir_index); + var lowered_relocs = lowered.relocs; + for (lowered.insts, 0..) |lowered_inst, lowered_index| { + const start_offset = @intCast(u32, emit.code.items.len); + try lowered_inst.encode(emit.code.writer(), .{}); + const end_offset = @intCast(u32, emit.code.items.len); + while (lowered_relocs.len > 0 and + lowered_relocs[0].lowered_inst_index == lowered_index) : ({ + lowered_relocs = lowered_relocs[1..]; + }) switch (lowered_relocs[0].target) { + .inst => |target| try emit.relocs.append(emit.lower.allocator, .{ + .source = start_offset, .target = target, .offset = end_offset - 4, - .addend = 0, - .pcrel = true, - .length = 2, - }); - } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| { - // Add relocation to the decl. - const atom_index = coff_file.getAtomIndexForSymbol( - .{ .sym_index = inst.data.relocation.atom_index, .file = null }, - ).?; - const target = coff_file.getGlobalByIndex(inst.data.relocation.sym_index); - try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{ - .type = .direct, - .target = target, - .offset = end_offset - 4, - .addend = 0, - .pcrel = true, - .length = 2, - }); - } else return emit.fail("TODO implement {} for {}", .{ inst.tag, emit.bin_file.tag }), - - .mov_linker, .lea_linker => if (emit.bin_file.cast(link.File.MachO)) |macho_file| { - const metadata = - emit.lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data; - const atom_index = macho_file.getAtomIndexForSymbol(.{ - .sym_index = metadata.atom_index, - .file = null, - }).?; - try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{ - .type = switch (inst.ops) { - .got_reloc => .got, - .direct_reloc => .signed, - .tlv_reloc => .tlv, - else => unreachable, - }, - .target = .{ .sym_index = metadata.sym_index, .file = null }, - .offset = @intCast(u32, end_offset - 4), - .addend = 0, - .pcrel = true, - .length = 2, - }); - } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| { - const metadata = - emit.lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data; - const atom_index = coff_file.getAtomIndexForSymbol(.{ - .sym_index = metadata.atom_index, - .file = null, - }).?; - try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{ - .type = switch (inst.ops) { - .got_reloc => .got, - .direct_reloc => .direct, - .import_reloc => .import, - else => unreachable, - }, - .target = switch (inst.ops) { - .got_reloc, - .direct_reloc, - => .{ .sym_index = metadata.sym_index, .file = null }, - .import_reloc => coff_file.getGlobalByIndex(metadata.sym_index), - else => unreachable, - }, - .offset = @intCast(u32, end_offset - 4), - .addend = 0, - .pcrel = true, - .length = 2, - }); - } else return emit.fail("TODO implement {} for {}", .{ inst.tag, emit.bin_file.tag }), - - .jcc => try emit.relocs.append(emit.lower.allocator, .{ - .source = start_offset, - .target = inst.data.inst_cc.inst, - .offset = end_offset - 4, - .length = 6, - }), - - .dbg_line => try emit.dbgAdvancePCAndLine( - inst.data.line_column.line, - inst.data.line_column.column, - ), - - .dbg_prologue_end => { - switch (emit.debug_output) { - .dwarf => |dw| { - try dw.setPrologueEnd(); - log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{ - emit.prev_di_line, emit.prev_di_column, - }); - try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); - }, - .plan9 => {}, - .none => {}, - } - }, + .length = @intCast(u5, end_offset - start_offset), + }), + .@"extern" => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| { + // Add relocation to the decl. + const atom_index = macho_file.getAtomIndexForSymbol( + .{ .sym_index = symbol.atom_index, .file = null }, + ).?; + const target = macho_file.getGlobalByIndex(symbol.sym_index); + try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{ + .type = .branch, + .target = target, + .offset = end_offset - 4, + .addend = 0, + .pcrel = true, + .length = 2, + }); + } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| { + // Add relocation to the decl. + const atom_index = coff_file.getAtomIndexForSymbol( + .{ .sym_index = symbol.atom_index, .file = null }, + ).?; + const target = coff_file.getGlobalByIndex(symbol.sym_index); + try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{ + .type = .direct, + .target = target, + .offset = end_offset - 4, + .addend = 0, + .pcrel = true, + .length = 2, + }); + } else return emit.fail("TODO implement extern reloc for {s}", .{ + @tagName(emit.bin_file.tag), + }), + .linker_got, + .linker_direct, + .linker_import, + .linker_tlv, + => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| { + const atom_index = macho_file.getAtomIndexForSymbol(.{ + .sym_index = symbol.atom_index, + .file = null, + }).?; + try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{ + .type = switch (lowered_relocs[0].target) { + .linker_got => .got, + .linker_direct => .signed, + .linker_tlv => .tlv, + else => unreachable, + }, + .target = .{ .sym_index = symbol.sym_index, .file = null }, + .offset = @intCast(u32, end_offset - 4), + .addend = 0, + .pcrel = true, + .length = 2, + }); + } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| { + const atom_index = coff_file.getAtomIndexForSymbol(.{ + .sym_index = symbol.atom_index, + .file = null, + }).?; + try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{ + .type = switch (lowered_relocs[0].target) { + .linker_got => .got, + .linker_direct => .direct, + .linker_import => .import, + else => unreachable, + }, + .target = switch (lowered_relocs[0].target) { + .linker_got, + .linker_direct, + => .{ .sym_index = symbol.sym_index, .file = null }, + .linker_import => coff_file.getGlobalByIndex(symbol.sym_index), + else => unreachable, + }, + .offset = @intCast(u32, end_offset - 4), + .addend = 0, + .pcrel = true, + .length = 2, + }); + } else return emit.fail("TODO implement linker reloc for {s}", .{ + @tagName(emit.bin_file.tag), + }), + }; + } + std.debug.assert(lowered_relocs.len == 0); - .dbg_epilogue_begin => { - switch (emit.debug_output) { - .dwarf => |dw| { - try dw.setEpilogueBegin(); - log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{ - emit.prev_di_line, emit.prev_di_column, - }); - try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); - }, - .plan9 => {}, - .none => {}, - } - }, + if (lowered.insts.len == 0) { + const mir_inst = emit.lower.mir.instructions.get(mir_index); + switch (mir_inst.tag) { + else => unreachable, + .dead => {}, + .dbg_line => try emit.dbgAdvancePCAndLine( + mir_inst.data.line_column.line, + mir_inst.data.line_column.column, + ), + .dbg_prologue_end => { + switch (emit.debug_output) { + .dwarf => |dw| { + try dw.setPrologueEnd(); + log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{ + emit.prev_di_line, emit.prev_di_column, + }); + try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); + }, + .plan9 => {}, + .none => {}, + } + }, + .dbg_epilogue_begin => { + switch (emit.debug_output) { + .dwarf => |dw| { + try dw.setEpilogueBegin(); + log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{ + emit.prev_di_line, emit.prev_di_column, + }); + try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); + }, + .plan9 => {}, + .none => {}, + } + }, + } } } try emit.fixupRelocs(); diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index d9482d4b39..d82d5ec300 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -5,13 +5,22 @@ mir: Mir, target: *const std.Target, err_msg: ?*ErrorMsg = null, src_loc: Module.SrcLoc, -result: [ +result_insts_len: u8 = undefined, +result_relocs_len: u8 = undefined, +result_insts: [ std.mem.max(usize, &.{ - abi.Win64.callee_preserved_regs.len, - abi.SysV.callee_preserved_regs.len, + 2, // cmovcc: cmovcc \ cmovcc + 3, // setcc: setcc \ setcc \ logicop + 2, // jcc: jcc \ jcc + abi.Win64.callee_preserved_regs.len, // push_regs/pop_regs + abi.SysV.callee_preserved_regs.len, // push_regs/pop_regs }) ]Instruction = undefined, -result_len: usize = undefined, +result_relocs: [ + std.mem.max(usize, &.{ + 2, // jcc: jcc \ jcc + }) +]Reloc = undefined, pub const Error = error{ OutOfMemory, @@ -20,13 +29,35 @@ pub const Error = error{ CannotEncode, }; -/// The returned slice is overwritten by the next call to lowerMir. -pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { - lower.result = undefined; - errdefer lower.result = undefined; - lower.result_len = 0; - defer lower.result_len = undefined; +pub const Reloc = struct { + lowered_inst_index: u8, + target: Target, + + const Target = union(enum) { + inst: Mir.Inst.Index, + @"extern": Mir.Reloc, + linker_got: Mir.Reloc, + linker_direct: Mir.Reloc, + linker_import: Mir.Reloc, + linker_tlv: Mir.Reloc, + }; +}; +/// The returned slice is overwritten by the next call to lowerMir. +pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { + insts: []const Instruction, + relocs: []const Reloc, +} { + lower.result_insts = undefined; + lower.result_relocs = undefined; + errdefer lower.result_insts = undefined; + errdefer lower.result_relocs = undefined; + lower.result_insts_len = 0; + lower.result_relocs_len = 0; + defer lower.result_insts_len = undefined; + defer lower.result_relocs_len = undefined; + + const inst = lower.mir.instructions.get(index); switch (inst.tag) { .adc, .add, @@ -185,22 +216,26 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .cmpxchgb => try lower.mirCmpxchgBytes(inst), - .jmp_reloc => try lower.emit(.none, .jmp, &.{.{ .imm = Immediate.s(0) }}), + .jmp_reloc => try lower.emitInstWithReloc(.none, .jmp, &.{ + .{ .imm = Immediate.s(0) }, + }, .{ .inst = inst.data.inst }), - .call_extern => try lower.emit(.none, .call, &.{.{ .imm = Immediate.s(0) }}), + .call_extern => try lower.emitInstWithReloc(.none, .call, &.{ + .{ .imm = Immediate.s(0) }, + }, .{ .@"extern" = inst.data.relocation }), - .lea_linker => try lower.mirLeaLinker(inst), - .mov_linker => try lower.mirMovLinker(inst), + .lea_linker => try lower.mirLinker(.lea, inst), + .mov_linker => try lower.mirLinker(.mov, inst), .mov_moffs => try lower.mirMovMoffs(inst), .movsx => try lower.mirMovsx(inst), .cmovcc => try lower.mirCmovcc(inst), .setcc => try lower.mirSetcc(inst), - .jcc => try lower.emit(.none, mnem_cc(.j, inst.data.inst_cc.cc), &.{.{ .imm = Immediate.s(0) }}), + .jcc => try lower.mirJcc(index, inst), - .push_regs => try lower.mirPushPopRegisterList(inst, .push), - .pop_regs => try lower.mirPushPopRegisterList(inst, .pop), + .push_regs => try lower.mirRegisterList(.push, inst), + .pop_regs => try lower.mirRegisterList(.pop, inst), .dbg_line, .dbg_prologue_end, @@ -209,7 +244,10 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { => {}, } - return lower.result[0..lower.result_len]; + return .{ + .insts = lower.result_insts[0..lower.result_insts_len], + .relocs = lower.result_relocs[0..lower.result_relocs_len], + }; } pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error { @@ -221,7 +259,10 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error { fn mnem_cc(comptime base: @Type(.EnumLiteral), cc: bits.Condition) Mnemonic { return switch (cc) { - inline else => |c| @field(Mnemonic, @tagName(base) ++ @tagName(c)), + inline else => |c| if (@hasField(Mnemonic, @tagName(base) ++ @tagName(c))) + @field(Mnemonic, @tagName(base) ++ @tagName(c)) + else + unreachable, }; } @@ -247,6 +288,8 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { .rmi_rip, .mri_sib, .mri_rip, + .rrmi_sib, + .rrmi_rip, => Immediate.u(i), .ri64 => Immediate.u(lower.mir.extraData(Mir.Imm64, i).data.decode()), @@ -267,6 +310,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { .mr_sib, .mrr_sib, .mri_sib, + .rrmi_sib, .lock_m_sib, .lock_mi_sib_u, .lock_mi_sib_s, @@ -283,6 +327,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { .mr_rip, .mrr_rip, .mri_rip, + .rrmi_rip, .lock_m_rip, .lock_mi_rip_u, .lock_mi_rip_s, @@ -298,13 +343,28 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { }); } -fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void { - lower.result[lower.result_len] = try Instruction.new(prefix, mnemonic, ops); - lower.result_len += 1; +fn emitInst(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void { + lower.result_insts[lower.result_insts_len] = try Instruction.new(prefix, mnemonic, ops); + lower.result_insts_len += 1; +} + +fn emitInstWithReloc( + lower: *Lower, + prefix: Prefix, + mnemonic: Mnemonic, + ops: []const Operand, + target: Reloc.Target, +) Error!void { + lower.result_relocs[lower.result_relocs_len] = .{ + .lowered_inst_index = lower.result_insts_len, + .target = target, + }; + lower.result_relocs_len += 1; + try lower.emitInst(prefix, mnemonic, ops); } fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { - try lower.emit(switch (inst.ops) { + try lower.emitInst(switch (inst.ops) { else => .none, .lock_m_sib, .lock_m_rip, @@ -389,13 +449,19 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { .{ .reg = inst.data.rix.r }, .{ .imm = lower.imm(inst.ops, inst.data.rix.i) }, }, + .rrmi_sib, .rrmi_rip => &.{ + .{ .reg = inst.data.rrix.r1 }, + .{ .reg = inst.data.rrix.r2 }, + .{ .mem = lower.mem(inst.ops, inst.data.rrix.payload) }, + .{ .imm = lower.imm(inst.ops, inst.data.rrix.i) }, + }, else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), }); } fn mirString(lower: *Lower, inst: Mir.Inst) Error!void { switch (inst.ops) { - .string => try lower.emit(switch (inst.data.string.repeat) { + .string => try lower.emitInst(switch (inst.data.string.repeat) { inline else => |repeat| @field(Prefix, @tagName(repeat)), }, switch (inst.tag) { inline .cmps, .lods, .movs, .scas, .stos => |tag| switch (inst.data.string.width) { @@ -414,7 +480,7 @@ fn mirCmpxchgBytes(lower: *Lower, inst: Mir.Inst) Error!void { }, else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), }; - try lower.emit(switch (inst.ops) { + try lower.emitInst(switch (inst.ops) { .m_sib, .m_rip => .none, .lock_m_sib, .lock_m_rip => .lock, else => unreachable, @@ -426,7 +492,7 @@ fn mirCmpxchgBytes(lower: *Lower, inst: Mir.Inst) Error!void { } fn mirMovMoffs(lower: *Lower, inst: Mir.Inst) Error!void { - try lower.emit(switch (inst.ops) { + try lower.emitInst(switch (inst.ops) { .rax_moffs, .moffs_rax => .none, .lock_moffs_rax => .lock, else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), @@ -455,7 +521,7 @@ fn mirMovsx(lower: *Lower, inst: Mir.Inst) Error!void { }, else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), }; - try lower.emit(.none, switch (ops[0].bitSize()) { + try lower.emitInst(.none, switch (ops[0].bitSize()) { 32, 64 => switch (ops[1].bitSize()) { 32 => .movsxd, else => .movsx, @@ -465,32 +531,82 @@ fn mirMovsx(lower: *Lower, inst: Mir.Inst) Error!void { } fn mirCmovcc(lower: *Lower, inst: Mir.Inst) Error!void { - switch (inst.ops) { - .rr_cc => try lower.emit(.none, mnem_cc(.cmov, inst.data.rr_cc.cc), &.{ + const data: struct { cc: bits.Condition, ops: [2]Operand } = switch (inst.ops) { + .rr_cc => .{ .cc = inst.data.rr_cc.cc, .ops = .{ .{ .reg = inst.data.rr_cc.r1 }, .{ .reg = inst.data.rr_cc.r2 }, - }), - .rm_sib_cc, .rm_rip_cc => try lower.emit(.none, mnem_cc(.cmov, inst.data.rx_cc.cc), &.{ + } }, + .rm_sib_cc, .rm_rip_cc => .{ .cc = inst.data.rx_cc.cc, .ops = .{ .{ .reg = inst.data.rx_cc.r }, .{ .mem = lower.mem(inst.ops, inst.data.rx_cc.payload) }, - }), + } }, else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), + }; + switch (data.cc) { + else => |cc| try lower.emitInst(.none, mnem_cc(.cmov, cc), &data.ops), + .z_and_np => { + try lower.emitInst(.none, mnem_cc(.cmov, .nz), &.{ data.ops[1], data.ops[0] }); + try lower.emitInst(.none, mnem_cc(.cmov, .np), &data.ops); + }, + .nz_or_p => { + try lower.emitInst(.none, mnem_cc(.cmov, .nz), &data.ops); + try lower.emitInst(.none, mnem_cc(.cmov, .p), &data.ops); + }, } } fn mirSetcc(lower: *Lower, inst: Mir.Inst) Error!void { - switch (inst.ops) { - .r_cc => try lower.emit(.none, mnem_cc(.set, inst.data.r_cc.cc), &.{ + const data: struct { cc: bits.Condition, ops: [2]Operand } = switch (inst.ops) { + .r_cc => .{ .cc = inst.data.r_cc.cc, .ops = .{ .{ .reg = inst.data.r_cc.r }, - }), - .m_sib_cc, .m_rip_cc => try lower.emit(.none, mnem_cc(.set, inst.data.x_cc.cc), &.{ + .{ .reg = inst.data.r_cc.scratch }, + } }, + .m_sib_cc, .m_rip_cc => .{ .cc = inst.data.x_cc.cc, .ops = .{ .{ .mem = lower.mem(inst.ops, inst.data.x_cc.payload) }, - }), + .{ .reg = inst.data.x_cc.scratch }, + } }, else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), + }; + switch (data.cc) { + else => |cc| try lower.emitInst(.none, mnem_cc(.set, cc), data.ops[0..1]), + .z_and_np => { + try lower.emitInst(.none, mnem_cc(.set, .z), data.ops[0..1]); + try lower.emitInst(.none, mnem_cc(.set, .np), data.ops[1..2]); + try lower.emitInst(.none, .@"and", data.ops[0..2]); + }, + .nz_or_p => { + try lower.emitInst(.none, mnem_cc(.set, .nz), data.ops[0..1]); + try lower.emitInst(.none, mnem_cc(.set, .p), data.ops[1..2]); + try lower.emitInst(.none, .@"or", data.ops[0..2]); + }, } } -fn mirPushPopRegisterList(lower: *Lower, inst: Mir.Inst, comptime mnemonic: Mnemonic) Error!void { +fn mirJcc(lower: *Lower, index: Mir.Inst.Index, inst: Mir.Inst) Error!void { + switch (inst.data.inst_cc.cc) { + else => |cc| try lower.emitInstWithReloc(.none, mnem_cc(.j, cc), &.{ + .{ .imm = Immediate.s(0) }, + }, .{ .inst = inst.data.inst_cc.inst }), + .z_and_np => { + try lower.emitInstWithReloc(.none, mnem_cc(.j, .nz), &.{ + .{ .imm = Immediate.s(0) }, + }, .{ .inst = index + 1 }); + try lower.emitInstWithReloc(.none, mnem_cc(.j, .np), &.{ + .{ .imm = Immediate.s(0) }, + }, .{ .inst = inst.data.inst_cc.inst }); + }, + .nz_or_p => { + try lower.emitInstWithReloc(.none, mnem_cc(.j, .nz), &.{ + .{ .imm = Immediate.s(0) }, + }, .{ .inst = inst.data.inst_cc.inst }); + try lower.emitInstWithReloc(.none, mnem_cc(.j, .p), &.{ + .{ .imm = Immediate.s(0) }, + }, .{ .inst = inst.data.inst_cc.inst }); + }, + } +} + +fn mirRegisterList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Error!void { const reg_list = Mir.RegisterList.fromInt(inst.data.payload); const callee_preserved_regs = abi.getCalleePreservedRegs(lower.target.*); var it = reg_list.iterator(.{ .direction = switch (mnemonic) { @@ -498,24 +614,20 @@ fn mirPushPopRegisterList(lower: *Lower, inst: Mir.Inst, comptime mnemonic: Mnem .pop => .forward, else => unreachable, } }); - while (it.next()) |i| try lower.emit(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }}); + while (it.next()) |i| try lower.emitInst(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }}); } -fn mirLeaLinker(lower: *Lower, inst: Mir.Inst) Error!void { - const metadata = lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data; - const reg = @intToEnum(Register, metadata.reg); - try lower.emit(.none, .lea, &.{ - .{ .reg = reg }, - .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) }, - }); -} - -fn mirMovLinker(lower: *Lower, inst: Mir.Inst) Error!void { - const metadata = lower.mir.extraData(Mir.LeaRegisterReloc, inst.data.payload).data; - const reg = @intToEnum(Register, metadata.reg); - try lower.emit(.none, .mov, &.{ - .{ .reg = reg }, - .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) }, +fn mirLinker(lower: *Lower, mnemonic: Mnemonic, inst: Mir.Inst) Error!void { + const reloc = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data; + try lower.emitInstWithReloc(.none, mnemonic, &.{ + .{ .reg = inst.data.rx.r }, + .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(inst.data.rx.r.bitSize()), 0) }, + }, switch (inst.ops) { + .got_reloc => .{ .linker_got = reloc }, + .direct_reloc => .{ .linker_direct = reloc }, + .import_reloc => .{ .linker_import = reloc }, + .tlv_reloc => .{ .linker_tlv = reloc }, + else => unreachable, }); } diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 9e39d23bd4..e261f6dc38 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -434,6 +434,12 @@ pub const Inst = struct { /// Register, memory (SIB), immediate (byte) operands. /// Uses `rix` payload with extra data of type `MemorySib`. rmi_sib, + /// Register, register, memory (RIP), immediate (byte) operands. + /// Uses `rrix` payload with extra data of type `MemoryRip`. + rrmi_rip, + /// Register, register, memory (SIB), immediate (byte) operands. + /// Uses `rrix` payload with extra data of type `MemorySib`. + rrmi_sib, /// Register, memory (RIP), immediate (byte) operands. /// Uses `rix` payload with extra data of type `MemoryRip`. rmi_rip, @@ -524,16 +530,16 @@ pub const Inst = struct { /// Uses `reloc` payload. reloc, /// Linker relocation - GOT indirection. - /// Uses `payload` payload with extra data of type `LeaRegisterReloc`. + /// Uses `rx` payload with extra data of type `Reloc`. got_reloc, /// Linker relocation - direct reference. - /// Uses `payload` payload with extra data of type `LeaRegisterReloc`. + /// Uses `rx` payload with extra data of type `Reloc`. direct_reloc, /// Linker relocation - imports table indirection (binding). - /// Uses `payload` payload with extra data of type `LeaRegisterReloc`. + /// Uses `rx` payload with extra data of type `Reloc`. import_reloc, /// Linker relocation - threadlocal variable via GOT indirection. - /// Uses `payload` payload with extra data of type `LeaRegisterReloc`. + /// Uses `rx` payload with extra data of type `Reloc`. tlv_reloc, }; @@ -567,12 +573,14 @@ pub const Inst = struct { }, /// Condition code (CC), followed by custom payload found in extra. x_cc: struct { + scratch: Register, cc: bits.Condition, payload: u32, }, /// Register with condition code (CC). r_cc: struct { r: Register, + scratch: Register, cc: bits.Condition, }, /// Register, register with condition code (CC). @@ -614,6 +622,13 @@ pub const Inst = struct { i: u8, payload: u32, }, + /// Register, register, byte immediate, followed by Custom payload found in extra. + rrix: struct { + r1: Register, + r2: Register, + i: u8, + payload: u32, + }, /// String instruction prefix and width. string: struct { repeat: bits.StringRepeat, @@ -622,12 +637,7 @@ pub const Inst = struct { /// Relocation for the linker where: /// * `atom_index` is the index of the source /// * `sym_index` is the index of the target - relocation: struct { - /// Index of the containing atom. - atom_index: u32, - /// Index into the linker's symbol table. - sym_index: u32, - }, + relocation: Reloc, /// Debug line and column position line_column: struct { line: u32, @@ -646,9 +656,7 @@ pub const Inst = struct { } }; -pub const LeaRegisterReloc = struct { - /// Destination register. - reg: u32, +pub const Reloc = struct { /// Index of the containing atom. atom_index: u32, /// Index into the linker's symbol table. diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig index 5d06865566..77dc0cfb7c 100644 --- a/src/arch/x86_64/bits.zig +++ b/src/arch/x86_64/bits.zig @@ -72,6 +72,12 @@ pub const Condition = enum(u5) { /// zero z, + // Pseudo conditions + /// zero and not parity + z_and_np, + /// not zero or parity + nz_or_p, + /// Converts a std.math.CompareOperator into a condition flag, /// i.e. returns the condition that is true iff the result of the /// comparison is true. Assumes signed comparison @@ -143,6 +149,9 @@ pub const Condition = enum(u5) { .po => .pe, .s => .ns, .z => .nz, + + .z_and_np => .nz_or_p, + .nz_or_p => .z_and_np, }; } }; diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index 495edb5f2a..47211591ec 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -245,9 +245,9 @@ pub const Instruction = struct { }, .mem => |mem| { const op = switch (data.op_en) { - .m, .mi, .m1, .mc => .none, + .m, .mi, .m1, .mc, .vmi => .none, .mr, .mri, .mrc => inst.ops[1], - .rm, .rmi => inst.ops[0], + .rm, .rmi, .rvm, .rvmi => inst.ops[0], else => unreachable, }; try encodeMemory(enc, mem, op, encoder); diff --git a/test/behavior/bugs/12891.zig b/test/behavior/bugs/12891.zig index e558783705..354d9e856e 100644 --- a/test/behavior/bugs/12891.zig +++ b/test/behavior/bugs/12891.zig @@ -29,7 +29,6 @@ test "inf >= 1" { test "isNan(nan * 1)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const nan_times_one = comptime std.math.nan(f64) * 1; try std.testing.expect(std.math.isNan(nan_times_one)); @@ -37,7 +36,6 @@ test "isNan(nan * 1)" { test "runtime isNan(nan * 1)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const nan_times_one = std.math.nan(f64) * 1; try std.testing.expect(std.math.isNan(nan_times_one)); @@ -45,7 +43,6 @@ test "runtime isNan(nan * 1)" { test "isNan(nan * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const nan_times_zero = comptime std.math.nan(f64) * 0; try std.testing.expect(std.math.isNan(nan_times_zero)); @@ -55,7 +52,6 @@ test "isNan(nan * 0)" { test "isNan(inf * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const inf_times_zero = comptime std.math.inf(f64) * 0; try std.testing.expect(std.math.isNan(inf_times_zero)); @@ -65,7 +61,6 @@ test "isNan(inf * 0)" { test "runtime isNan(nan * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const nan_times_zero = std.math.nan(f64) * 0; try std.testing.expect(std.math.isNan(nan_times_zero)); @@ -75,7 +70,6 @@ test "runtime isNan(nan * 0)" { test "runtime isNan(inf * 0)" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const inf_times_zero = std.math.inf(f64) * 0; try std.testing.expect(std.math.isNan(inf_times_zero)); diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig index bf99fd1795..c56bcad0d2 100644 --- a/test/behavior/field_parent_ptr.zig +++ b/test/behavior/field_parent_ptr.zig @@ -2,7 +2,6 @@ const expect = @import("std").testing.expect; const builtin = @import("builtin"); test "@fieldParentPtr non-first field" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO try testParentFieldPtr(&foo.c); -- cgit v1.2.3 From 4b75352c78731f76cfeac0b5c78c03f232022096 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 6 May 2023 16:05:52 -0400 Subject: x86_64: implement packed floating point fields --- src/arch/x86_64/CodeGen.zig | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 87eceec347..befd5be0fd 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4922,7 +4922,14 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } if (field_extra_bits > 0) try self.truncateRegister(field_ty, dst_reg); - break :result .{ .register = dst_reg }; + + const dst_mcv = MCValue{ .register = dst_reg }; + const dst_rc = regClassForType(field_ty); + if (dst_rc.eql(gp)) break :result dst_mcv; + + const result_reg = try self.register_manager.allocReg(inst, dst_rc); + try self.genSetReg(result_reg, field_ty, dst_mcv); + break :result .{ .register = result_reg }; }, .register => |reg| { const reg_lock = self.register_manager.lockRegAssumeUnused(reg); @@ -7896,7 +7903,8 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void { const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - if (abi_size > 8) return self.fail("genSetReg called with a value larger than one register", .{}); + if (abi_size * 8 > dst_reg.bitSize()) + return self.fail("genSetReg called with a value larger than dst_reg", .{}); switch (src_mcv) { .none, .unreach, -- cgit v1.2.3 From 3a5e3c52e0f09112989a2a40345305bfe9508431 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 6 May 2023 20:31:48 -0400 Subject: x86_64: implement `@mulAdd` --- src/arch/x86_64/CodeGen.zig | 169 +++++++++++++++++++++++++++++++++++++++++- src/arch/x86_64/Encoding.zig | 24 ++++-- src/arch/x86_64/Lower.zig | 22 ++++++ src/arch/x86_64/Mir.zig | 31 ++++++++ src/arch/x86_64/bits.zig | 16 +++- src/arch/x86_64/encodings.zig | 23 ++++++ test/behavior/muladd.zig | 6 +- 7 files changed, 277 insertions(+), 14 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index befd5be0fd..fffb814d7f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1200,6 +1200,32 @@ fn asmRegisterRegisterImmediate( }); } +fn asmRegisterRegisterMemory( + self: *Self, + tag: Mir.Inst.Tag, + reg1: Register, + reg2: Register, + m: Memory, +) !void { + _ = try self.addInst(.{ + .tag = tag, + .ops = switch (m) { + .sib => .rrm_sib, + .rip => .rrm_rip, + else => unreachable, + }, + .data = .{ .rrx = .{ + .r1 = reg1, + .r2 = reg2, + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, + } }, + }); +} + fn asmMemory(self: *Self, tag: Mir.Inst.Tag, m: Memory) !void { _ = try self.addInst(.{ .tag = tag, @@ -9369,9 +9395,146 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - _ = extra; - return self.fail("TODO implement airMulAdd for x86_64", .{}); - //return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, pl_op.operand }); + const ty = self.air.typeOfIndex(inst); + + if (!self.hasFeature(.fma)) return self.fail("TODO implement airMulAdd for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }); + + const ops = [3]Air.Inst.Ref{ extra.lhs, extra.rhs, pl_op.operand }; + var mcvs: [3]MCValue = undefined; + var locks = [1]?RegisterManager.RegisterLock{null} ** 3; + defer for (locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock); + var order = [1]u2{0} ** 3; + var unused = std.StaticBitSet(3).initFull(); + for (ops, &mcvs, &locks, 0..) |op, *mcv, *lock, op_i| { + const op_index = @intCast(u2, op_i); + mcv.* = try self.resolveInst(op); + if (unused.isSet(0) and mcv.isRegister() and self.reuseOperand(inst, op, op_index, mcv.*)) { + order[op_index] = 1; + unused.unset(0); + } else if (unused.isSet(2) and mcv.isMemory()) { + order[op_index] = 3; + unused.unset(2); + } + switch (mcv.*) { + .register => |reg| lock.* = self.register_manager.lockReg(reg), + else => {}, + } + } + for (&order, &mcvs, &locks) |*mop_index, *mcv, *lock| { + if (mop_index.* != 0) continue; + mop_index.* = 1 + @intCast(u2, unused.toggleFirstSet().?); + if (mop_index.* > 1 and mcv.isRegister()) continue; + const reg = try self.copyToTmpRegister(ty, mcv.*); + mcv.* = .{ .register = reg }; + if (lock.*) |old_lock| self.register_manager.unlockReg(old_lock); + lock.* = self.register_manager.lockRegAssumeUnused(reg); + } + + const tag: ?Mir.Inst.Tag = + if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) + switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .vfmadd132ss, + 64 => .vfmadd132sd, + else => null, + }, + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 32 => switch (ty.vectorLen()) { + 1 => .vfmadd132ss, + 2...8 => .vfmadd132ps, + else => null, + }, + 64 => switch (ty.vectorLen()) { + 1 => .vfmadd132sd, + 2...4 => .vfmadd132pd, + else => null, + }, + else => null, + }, + else => null, + }, + else => unreachable, + } + else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) + switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .vfmadd213ss, + 64 => .vfmadd213sd, + else => null, + }, + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 32 => switch (ty.vectorLen()) { + 1 => .vfmadd213ss, + 2...8 => .vfmadd213ps, + else => null, + }, + 64 => switch (ty.vectorLen()) { + 1 => .vfmadd213sd, + 2...4 => .vfmadd213pd, + else => null, + }, + else => null, + }, + else => null, + }, + else => unreachable, + } + else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) + switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .vfmadd231ss, + 64 => .vfmadd231sd, + else => null, + }, + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 32 => switch (ty.vectorLen()) { + 1 => .vfmadd231ss, + 2...8 => .vfmadd231ps, + else => null, + }, + 64 => switch (ty.vectorLen()) { + 1 => .vfmadd231sd, + 2...4 => .vfmadd231pd, + else => null, + }, + else => null, + }, + else => null, + }, + else => null, + } + else + unreachable; + if (tag == null) return self.fail("TODO implement airMulAdd for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }); + + var mops: [3]MCValue = undefined; + for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv; + + const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); + const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); + if (mops[2].isRegister()) + try self.asmRegisterRegisterRegister( + tag.?, + mop1_reg, + mop2_reg, + registerAlias(mops[2].getReg().?, abi_size), + ) + else + try self.asmRegisterRegisterMemory( + tag.?, + mop1_reg, + mop2_reg, + mops[2].mem(Memory.PtrSize.fromSize(abi_size)), + ); + return self.finishAir(inst, mops[0], ops); } fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index ada1e891fb..94bfa63999 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -340,6 +340,11 @@ pub const Mnemonic = enum { vpunpcklbw, vpunpckldq, vpunpcklqdq, vpunpcklwd, // F16C vcvtph2ps, vcvtps2ph, + // FMA + vfmadd132pd, vfmadd213pd, vfmadd231pd, + vfmadd132ps, vfmadd213ps, vfmadd231ps, + vfmadd132sd, vfmadd213sd, vfmadd231sd, + vfmadd132ss, vfmadd213ss, vfmadd231ss, // zig fmt: on }; @@ -368,12 +373,13 @@ pub const Op = enum { r8, r16, r32, r64, rm8, rm16, rm32, rm64, r32_m16, r64_m16, - m8, m16, m32, m64, m80, m128, + m8, m16, m32, m64, m80, m128, m256, rel8, rel16, rel32, m, moffs, sreg, xmm, xmm_m32, xmm_m64, xmm_m128, + ymm, ymm_m256, // zig fmt: on pub fn fromOperand(operand: Instruction.Operand) Op { @@ -385,6 +391,7 @@ pub const Op = enum { .segment => return .sreg, .floating_point => return switch (reg.bitSize()) { 128 => .xmm, + 256 => .ymm, else => unreachable, }, .general_purpose => { @@ -418,6 +425,7 @@ pub const Op = enum { 64 => .m64, 80 => .m80, 128 => .m128, + 256 => .m256, else => unreachable, }; }, @@ -454,7 +462,8 @@ pub const Op = enum { .eax, .r32, .rm32, .r32_m16 => unreachable, .rax, .r64, .rm64, .r64_m16 => unreachable, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => unreachable, - .m8, .m16, .m32, .m64, .m80, .m128 => unreachable, + .ymm, .ymm_m256 => unreachable, + .m8, .m16, .m32, .m64, .m80, .m128, .m256 => unreachable, .unity => 1, .imm8, .imm8s, .rel8 => 8, .imm16, .imm16s, .rel16 => 16, @@ -468,12 +477,13 @@ pub const Op = enum { .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, .unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable, .rel8, .rel16, .rel32 => unreachable, - .m8, .m16, .m32, .m64, .m80, .m128 => unreachable, + .m8, .m16, .m32, .m64, .m80, .m128, .m256 => unreachable, .al, .cl, .r8, .rm8 => 8, .ax, .r16, .rm16 => 16, .eax, .r32, .rm32, .r32_m16 => 32, .rax, .r64, .rm64, .r64_m16 => 64, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => 128, + .ymm, .ymm_m256 => 256, }; } @@ -482,13 +492,14 @@ pub const Op = enum { .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, .unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable, .rel8, .rel16, .rel32 => unreachable, - .al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64, .xmm => unreachable, + .al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64, .xmm, .ymm => unreachable, .m8, .rm8 => 8, .m16, .rm16, .r32_m16, .r64_m16 => 16, .m32, .rm32, .xmm_m32 => 32, .m64, .rm64, .xmm_m64 => 64, .m80 => 80, .m128, .xmm_m128 => 128, + .m256, .ymm_m256 => 256, }; } @@ -513,6 +524,7 @@ pub const Op = enum { .rm8, .rm16, .rm32, .rm64, .r32_m16, .r64_m16, .xmm, .xmm_m32, .xmm_m64, .xmm_m128, + .ymm, .ymm_m256, => true, else => false, }; @@ -539,7 +551,7 @@ pub const Op = enum { .r32_m16, .r64_m16, .m8, .m16, .m32, .m64, .m80, .m128, .m, - .xmm_m32, .xmm_m64, .xmm_m128, + .xmm_m32, .xmm_m64, .xmm_m128, .ymm_m256, => true, else => false, }; @@ -562,6 +574,7 @@ pub const Op = enum { .r32_m16, .r64_m16 => .general_purpose, .sreg => .segment, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => .floating_point, + .ymm, .ymm_m256 => .floating_point, }; } @@ -625,6 +638,7 @@ pub const Feature = enum { none, avx, f16c, + fma, sse, sse2, sse3, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index d82d5ec300..a37f28c0c3 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -205,6 +205,19 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .vcvtph2ps, .vcvtps2ph, + + .vfmadd132pd, + .vfmadd213pd, + .vfmadd231pd, + .vfmadd132ps, + .vfmadd213ps, + .vfmadd231ps, + .vfmadd132sd, + .vfmadd213sd, + .vfmadd231sd, + .vfmadd132ss, + .vfmadd213ss, + .vfmadd231ss, => try lower.mirGeneric(inst), .cmps, @@ -288,6 +301,8 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { .rmi_rip, .mri_sib, .mri_rip, + .rrm_sib, + .rrm_rip, .rrmi_sib, .rrmi_rip, => Immediate.u(i), @@ -310,6 +325,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { .mr_sib, .mrr_sib, .mri_sib, + .rrm_sib, .rrmi_sib, .lock_m_sib, .lock_mi_sib_u, @@ -327,6 +343,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { .mr_rip, .mrr_rip, .mri_rip, + .rrm_rip, .rrmi_rip, .lock_m_rip, .lock_mi_rip_u, @@ -449,6 +466,11 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { .{ .reg = inst.data.rix.r }, .{ .imm = lower.imm(inst.ops, inst.data.rix.i) }, }, + .rrm_sib, .rrm_rip => &.{ + .{ .reg = inst.data.rrx.r1 }, + .{ .reg = inst.data.rrx.r2 }, + .{ .mem = lower.mem(inst.ops, inst.data.rrx.payload) }, + }, .rrmi_sib, .rrmi_rip => &.{ .{ .reg = inst.data.rrix.r1 }, .{ .reg = inst.data.rrix.r2 }, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index e261f6dc38..92a9a74fbb 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -324,6 +324,31 @@ pub const Inst = struct { /// Convert single-precision floating-point values to 16-bit floating-point values vcvtps2ph, + /// Fused multiply-add of packed double-precision floating-point values + vfmadd132pd, + /// Fused multiply-add of packed double-precision floating-point values + vfmadd213pd, + /// Fused multiply-add of packed double-precision floating-point values + vfmadd231pd, + /// Fused multiply-add of packed single-precision floating-point values + vfmadd132ps, + /// Fused multiply-add of packed single-precision floating-point values + vfmadd213ps, + /// Fused multiply-add of packed single-precision floating-point values + vfmadd231ps, + /// Fused multiply-add of scalar double-precision floating-point values + vfmadd132sd, + /// Fused multiply-add of scalar double-precision floating-point values + vfmadd213sd, + /// Fused multiply-add of scalar double-precision floating-point values + vfmadd231sd, + /// Fused multiply-add of scalar single-precision floating-point values + vfmadd132ss, + /// Fused multiply-add of scalar single-precision floating-point values + vfmadd213ss, + /// Fused multiply-add of scalar single-precision floating-point values + vfmadd231ss, + /// Compare string operands cmps, /// Load string @@ -434,6 +459,12 @@ pub const Inst = struct { /// Register, memory (SIB), immediate (byte) operands. /// Uses `rix` payload with extra data of type `MemorySib`. rmi_sib, + /// Register, register, memory (RIP). + /// Uses `rrix` payload with extra data of type `MemoryRip`. + rrm_rip, + /// Register, register, memory (SIB). + /// Uses `rrix` payload with extra data of type `MemorySib`. + rrm_sib, /// Register, register, memory (RIP), immediate (byte) operands. /// Uses `rrix` payload with extra data of type `MemoryRip`. rrmi_rip, diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig index 77dc0cfb7c..b73a37d6cb 100644 --- a/src/arch/x86_64/bits.zig +++ b/src/arch/x86_64/bits.zig @@ -485,7 +485,9 @@ pub const Memory = union(enum) { dword, qword, tbyte, - dqword, + xword, + yword, + zword, pub fn fromSize(size: u32) PtrSize { return switch (size) { @@ -493,7 +495,9 @@ pub const Memory = union(enum) { 2...2 => .word, 3...4 => .dword, 5...8 => .qword, - 9...16 => .dqword, + 9...16 => .xword, + 17...32 => .yword, + 33...64 => .zword, else => unreachable, }; } @@ -505,7 +509,9 @@ pub const Memory = union(enum) { 32 => .dword, 64 => .qword, 80 => .tbyte, - 128 => .dqword, + 128 => .xword, + 256 => .yword, + 512 => .zword, else => unreachable, }; } @@ -517,7 +523,9 @@ pub const Memory = union(enum) { .dword => 32, .qword => 64, .tbyte => 80, - .dqword => 128, + .xword => 128, + .yword => 256, + .zword => 512, }; } }; diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 5d2630e9a8..dd05728e24 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -1016,5 +1016,28 @@ pub const table = [_]Entry{ .{ .vcvtph2ps, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_128, .f16c }, .{ .vcvtps2ph, .mri, &.{ .xmm_m64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x1d }, 0, .vex_128, .f16c }, + + // FMA + .{ .vfmadd132pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_128_long, .fma }, + .{ .vfmadd132pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_256_long, .fma }, + .{ .vfmadd213pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_128_long, .fma }, + .{ .vfmadd213pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_256_long, .fma }, + .{ .vfmadd231pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_128_long, .fma }, + .{ .vfmadd231pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_256_long, .fma }, + + .{ .vfmadd132ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_128, .fma }, + .{ .vfmadd132ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_256, .fma }, + .{ .vfmadd213ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_128, .fma }, + .{ .vfmadd213ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_256, .fma }, + .{ .vfmadd231ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_128, .fma }, + .{ .vfmadd231ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_256, .fma }, + + .{ .vfmadd132sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x99 }, 0, .vex_128_long, .fma }, + .{ .vfmadd213sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0xa9 }, 0, .vex_128_long, .fma }, + .{ .vfmadd231sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_128_long, .fma }, + + .{ .vfmadd132ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0x99 }, 0, .vex_128, .fma }, + .{ .vfmadd213ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xa9 }, 0, .vex_128, .fma }, + .{ .vfmadd231ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_128, .fma }, }; // zig fmt: on diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig index aa36c99784..8656dc4f45 100644 --- a/test/behavior/muladd.zig +++ b/test/behavior/muladd.zig @@ -1,8 +1,10 @@ +const std = @import("std"); const builtin = @import("builtin"); -const expect = @import("std").testing.expect; +const expect = std.testing.expect; test "@mulAdd" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From cba195c1170fff77c5210f023e019d72f13b9614 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 6 May 2023 22:27:39 -0400 Subject: x86_64: implement some float and float vector movement This allows actually storing value of these supported types in registers, and not restricting them to stack slots. --- src/arch/x86_64/CodeGen.zig | 127 +++++++++++++++++++++++++++++++++--------- src/arch/x86_64/Encoding.zig | 18 ++++-- src/arch/x86_64/Lower.zig | 6 ++ src/arch/x86_64/Mir.zig | 12 ++++ src/arch/x86_64/encoder.zig | 13 +++-- src/arch/x86_64/encodings.zig | 30 ++++++++++ test/behavior/math.zig | 3 +- test/behavior/muladd.zig | 10 ++-- 8 files changed, 176 insertions(+), 43 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index fffb814d7f..3e47ef63f6 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2008,6 +2008,11 @@ fn computeFrameLayout(self: *Self) !FrameLayout { }; } +fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) u32 { + const alloc_align = @as(u32, 1) << self.frame_allocs.get(@enumToInt(frame_addr.index)).abi_align; + return @min(alloc_align, @bitCast(u32, frame_addr.off) & (alloc_align - 1)); +} + fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { const frame_allocs_slice = self.frame_allocs.slice(); const frame_size = frame_allocs_slice.items(.abi_size); @@ -2051,24 +2056,36 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { return self.allocRegOrMemAdvanced(elem_ty, null, reg_ok); } -fn allocRegOrMemAdvanced(self: *Self, elem_ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { +fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { + const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { const mod = self.bin_file.options.module.?; - return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); + return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; - if (reg_ok) { - // Make sure the type can fit in a register before we try to allocate one. - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); - if (abi_size <= ptr_bytes) { - if (self.register_manager.tryAllocReg(inst, regClassForType(elem_ty))) |reg| { + if (reg_ok) need_mem: { + if (abi_size <= @as(u32, switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 16, 32, 64, 128 => 16, + 80 => break :need_mem, + else => unreachable, + }, + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 16, 32, 64 => if (self.hasFeature(.avx)) 32 else 16, + 80, 128 => break :need_mem, + else => unreachable, + }, + else => break :need_mem, + }, + else => 8, + })) { + if (self.register_manager.tryAllocReg(inst, regClassForType(ty))) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } } - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(elem_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, self.target.*)); return .{ .load_frame = .{ .index = frame_index } }; } @@ -4442,12 +4459,19 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: Immediate) !void { }), }; assert(dst_mcv.isRegister()); + const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); if (src_mcv.isRegister()) - try self.asmRegisterRegisterImmediate(mir_tag, dst_mcv.getReg().?, src_mcv.getReg().?, mode) + try self.asmRegisterRegisterImmediate( + mir_tag, + dst_reg, + registerAlias(src_mcv.getReg().?, abi_size), + mode, + ) else try self.asmRegisterMemoryImmediate( mir_tag, - dst_mcv.getReg().?, + dst_reg, src_mcv.mem(Memory.PtrSize.fromSize(@intCast(u32, ty.abiSize(self.target.*)))), mode, ); @@ -7847,19 +7871,43 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { return self.finishAirResult(inst, result); } -fn movMirTag(self: *Self, ty: Type) !Mir.Inst.Tag { - return switch (ty.zigTypeTag()) { - else => .mov, +fn movMirTag(self: *Self, ty: Type, aligned: bool) !Mir.Inst.Tag { + switch (ty.zigTypeTag()) { + else => return .mov, .Float => switch (ty.floatBits(self.target.*)) { 16 => unreachable, // needs special handling - 32 => .movss, - 64 => .movsd, - 128 => .movaps, - else => return self.fail("TODO movMirTag from {}", .{ - ty.fmt(self.bin_file.options.module.?), - }), + 32 => return if (self.hasFeature(.avx)) .vmovss else .movss, + 64 => return if (self.hasFeature(.avx)) .vmovsd else .movsd, + 128 => return if (self.hasFeature(.avx)) + if (aligned) .vmovaps else .vmovups + else if (aligned) .movaps else .movups, + else => {}, }, - }; + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 16 => unreachable, // needs special handling + 32 => switch (ty.vectorLen()) { + 1 => return if (self.hasFeature(.avx)) .vmovss else .movss, + 2...4 => return if (self.hasFeature(.avx)) + if (aligned) .vmovaps else .vmovups + else if (aligned) .movaps else .movups, + 5...8 => if (self.hasFeature(.avx)) return if (aligned) .vmovaps else .vmovups, + else => {}, + }, + 64 => switch (ty.vectorLen()) { + 1 => return if (self.hasFeature(.avx)) .vmovsd else .movsd, + 2 => return if (self.hasFeature(.avx)) + if (aligned) .vmovaps else .vmovups + else if (aligned) .movaps else .movups, + 3...4 => if (self.hasFeature(.avx)) return if (aligned) .vmovaps else .vmovups, + else => {}, + }, + else => {}, + }, + else => {}, + }, + } + return self.fail("TODO movMirTag for {}", .{ty.fmt(self.bin_file.options.module.?)}); } fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { @@ -8016,7 +8064,11 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr 0 => return self.genSetReg(dst_reg, ty, .{ .register = reg_off.reg }), else => .lea, }, - .indirect, .load_frame => try self.movMirTag(ty), + .indirect => try self.movMirTag(ty, false), + .load_frame => |frame_addr| try self.movMirTag( + ty, + self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(self.target.*), + ), .lea_frame => .lea, else => unreachable, }, @@ -8040,7 +8092,11 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ) else self.asmRegisterMemory( - try self.movMirTag(ty), + try self.movMirTag(ty, mem.isAlignedGeneric( + u32, + @bitCast(u32, small_addr), + ty.abiAlignment(self.target.*), + )), registerAlias(dst_reg, abi_size), src_mem, ); @@ -8080,7 +8136,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ) else try self.asmRegisterMemory( - try self.movMirTag(ty), + try self.movMirTag(ty, false), registerAlias(dst_reg, abi_size), src_mem, ); @@ -8194,7 +8250,24 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal ) else try self.asmMemoryRegister( - try self.movMirTag(ty), + try self.movMirTag(ty, switch (base) { + .none => mem.isAlignedGeneric( + u32, + @bitCast(u32, disp), + ty.abiAlignment(self.target.*), + ), + .reg => |reg| switch (reg) { + .es, .cs, .ss, .ds => mem.isAlignedGeneric( + u32, + @bitCast(u32, disp), + ty.abiAlignment(self.target.*), + ), + else => false, + }, + .frame => |frame_index| self.getFrameAddrAlignment( + .{ .index = frame_index, .off = disp }, + ) >= ty.abiAlignment(self.target.*), + }), dst_mem, registerAlias(src_reg, abi_size), ); @@ -8415,7 +8488,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.genCopy(self.air.typeOfIndex(inst), dest, operand); + try self.genCopy(if (!dest.isMemory() or operand.isMemory()) dst_ty else src_ty, dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 94bfa63999..1fd1112aaf 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -206,7 +206,7 @@ pub fn format( try writer.print("+{s} ", .{tag}); }, .m, .mi, .m1, .mc, .vmi => try writer.print("/{d} ", .{encoding.modRmExt()}), - .mr, .rm, .rmi, .mri, .mrc, .rvm, .rvmi => try writer.writeAll("/r "), + .mr, .rm, .rmi, .mri, .mrc, .rvm, .rvmi, .mvr => try writer.writeAll("/r "), } switch (encoding.data.op_en) { @@ -230,7 +230,7 @@ pub fn format( }; try writer.print("{s} ", .{tag}); }, - .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm, .mrc, .rvm => {}, + .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm, .mrc, .rvm, .mvr => {}, } try writer.print("{s} ", .{@tagName(encoding.mnemonic)}); @@ -332,7 +332,12 @@ pub const Mnemonic = enum { // SSE4.1 roundsd, roundss, // AVX - vmovddup, vmovshdup, vmovsldup, + vmovapd, vmovaps, + vmovddup, + vmovsd, + vmovshdup, vmovsldup, + vmovss, + vmovupd, vmovups, vpextrw, vpinsrw, vpshufhw, vpshuflw, vpsrld, vpsrlq, vpsrlw, @@ -357,7 +362,7 @@ pub const OpEn = enum { fd, td, m1, mc, mi, mr, rm, rmi, mri, mrc, - vmi, rvm, rvmi, + vmi, rvm, rvmi, mvr, // zig fmt: on }; @@ -549,9 +554,10 @@ pub const Op = enum { return switch (op) { .rm8, .rm16, .rm32, .rm64, .r32_m16, .r64_m16, - .m8, .m16, .m32, .m64, .m80, .m128, + .m8, .m16, .m32, .m64, .m80, .m128, .m256, .m, - .xmm_m32, .xmm_m64, .xmm_m128, .ymm_m256, + .xmm_m32, .xmm_m64, .xmm_m128, + .ymm_m256, => true, else => false, }; diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index a37f28c0c3..a246a97d4b 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -184,9 +184,15 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .roundsd, .roundss, + .vmovapd, + .vmovaps, .vmovddup, + .vmovsd, .vmovshdup, .vmovsldup, + .vmovss, + .vmovupd, + .vmovups, .vpextrw, .vpinsrw, .vpshufhw, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 92a9a74fbb..de7f2cff53 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -282,12 +282,24 @@ pub const Inst = struct { /// Round scalar single-precision floating-point values roundss, + /// Move aligned packed double-precision floating-point values + vmovapd, + /// Move aligned packed single-precision floating-point values + vmovaps, /// Replicate double floating-point values vmovddup, + /// Move or merge scalar double-precision floating-point value + vmovsd, /// Replicate single floating-point values vmovshdup, /// Replicate single floating-point values vmovsldup, + /// Move or merge scalar single-precision floating-point value + vmovss, + /// Move unaligned packed double-precision floating-point values + vmovupd, + /// Move unaligned packed single-precision floating-point values + vmovups, /// Extract word vpextrw, /// Insert word diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index 47211591ec..fa6ce676cb 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -228,7 +228,7 @@ pub const Instruction = struct { .td => try encoder.imm64(inst.ops[0].mem.moffs.offset), else => { const mem_op = switch (data.op_en) { - .m, .mi, .m1, .mc, .mr, .mri, .mrc => inst.ops[0], + .m, .mi, .m1, .mc, .mr, .mri, .mrc, .mvr => inst.ops[0], .rm, .rmi, .vmi => inst.ops[1], .rvm, .rvmi => inst.ops[2], else => unreachable, @@ -239,6 +239,7 @@ pub const Instruction = struct { .m, .mi, .m1, .mc, .vmi => enc.modRmExt(), .mr, .mri, .mrc => inst.ops[1].reg.lowEnc(), .rm, .rmi, .rvm, .rvmi => inst.ops[0].reg.lowEnc(), + .mvr => inst.ops[2].reg.lowEnc(), else => unreachable, }; try encoder.modRm_direct(rm, reg.lowEnc()); @@ -248,6 +249,7 @@ pub const Instruction = struct { .m, .mi, .m1, .mc, .vmi => .none, .mr, .mri, .mrc => inst.ops[1], .rm, .rmi, .rvm, .rvmi => inst.ops[0], + .mvr => inst.ops[2], else => unreachable, }; try encodeMemory(enc, mem, op, encoder); @@ -315,7 +317,7 @@ pub const Instruction = struct { } else null, - .vmi, .rvm, .rvmi => unreachable, + .vmi, .rvm, .rvmi, .mvr => unreachable, }; if (segment_override) |seg| { legacy.setSegmentOverride(seg); @@ -350,7 +352,7 @@ pub const Instruction = struct { rex.b = b_x_op.isBaseExtended(); rex.x = b_x_op.isIndexExtended(); }, - .vmi, .rvm, .rvmi => unreachable, + .vmi, .rvm, .rvmi, .mvr => unreachable, } try encoder.rex(rex); @@ -372,10 +374,11 @@ pub const Instruction = struct { switch (op_en) { .np, .i, .zi, .fd, .td, .d => {}, .o, .oi => vex.b = inst.ops[0].reg.isExtended(), - .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc, .vmi, .rvm, .rvmi => { + .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc, .vmi, .rvm, .rvmi, .mvr => { const r_op = switch (op_en) { .rm, .rmi, .rvm, .rvmi => inst.ops[0], .mr, .mri, .mrc => inst.ops[1], + .mvr => inst.ops[2], .m, .mi, .m1, .mc, .vmi => .none, else => unreachable, }; @@ -383,7 +386,7 @@ pub const Instruction = struct { const b_x_op = switch (op_en) { .rm, .rmi, .vmi => inst.ops[1], - .m, .mi, .m1, .mc, .mr, .mri, .mrc => inst.ops[0], + .m, .mi, .m1, .mc, .mr, .mri, .mrc, .mvr => inst.ops[0], .rvm, .rvmi => inst.ops[2], else => unreachable, }; diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index dd05728e24..607a87b8d9 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -974,12 +974,42 @@ pub const table = [_]Entry{ .{ .roundsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .none, .sse4_1 }, // AVX + .{ .vmovapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .vex_128, .avx }, + .{ .vmovapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .vex_128, .avx }, + .{ .vmovapd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x28 }, 0, .vex_256, .avx }, + .{ .vmovapd, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x29 }, 0, .vex_256, .avx }, + + .{ .vmovaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .vex_128, .avx }, + .{ .vmovaps, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x29 }, 0, .vex_128, .avx }, + .{ .vmovaps, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x28 }, 0, .vex_256, .avx }, + .{ .vmovaps, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x0f, 0x29 }, 0, .vex_256, .avx }, + .{ .vmovddup, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_128, .avx }, + .{ .vmovsd, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_128, .avx }, + .{ .vmovsd, .rm, &.{ .xmm, .m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_128, .avx }, + .{ .vmovsd, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_128, .avx }, + .{ .vmovsd, .mr, &.{ .m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_128, .avx }, + .{ .vmovshdup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x16 }, 0, .vex_128, .avx }, .{ .vmovsldup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .vex_128, .avx }, + .{ .vmovss, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf3, 0x0f, 0x10 }, 0, .vex_128, .avx }, + .{ .vmovss, .rm, &.{ .xmm, .m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .vex_128, .avx }, + .{ .vmovss, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .vex_128, .avx }, + .{ .vmovss, .mr, &.{ .m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .vex_128, .avx }, + + .{ .vmovupd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x10 }, 0, .vex_128, .avx }, + .{ .vmovupd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x11 }, 0, .vex_128, .avx }, + .{ .vmovupd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x10 }, 0, .vex_256, .avx }, + .{ .vmovupd, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x11 }, 0, .vex_256, .avx }, + + .{ .vmovups, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x10 }, 0, .vex_128, .avx }, + .{ .vmovups, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x11 }, 0, .vex_128, .avx }, + .{ .vmovups, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x10 }, 0, .vex_256, .avx }, + .{ .vmovups, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x0f, 0x11 }, 0, .vex_256, .avx }, + .{ .vpextrw, .mri, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x15 }, 0, .vex_128, .avx }, .{ .vpextrw, .mri, &.{ .r64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x15 }, 0, .vex_128_long, .avx }, .{ .vpextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .vex_128, .avx }, diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 0362bd3a2b..7e16111059 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -399,7 +399,8 @@ fn testBinaryNot128(comptime Type: type, x: Type) !void { test "division" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig index 8656dc4f45..bfb94de270 100644 --- a/test/behavior/muladd.zig +++ b/test/behavior/muladd.zig @@ -2,9 +2,11 @@ const std = @import("std"); const builtin = @import("builtin"); const expect = std.testing.expect; +const stage2_x86_64_without_hardware_fma_support = builtin.zig_backend == .stage2_x86_64 and + !std.Target.x86.featureSetHas(builtin.cpu.features, .fma); + test "@mulAdd" { - if (builtin.zig_backend == .stage2_x86_64 and - !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .fma)) return error.SkipZigTest; // TODO + if (stage2_x86_64_without_hardware_fma_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -118,7 +120,7 @@ fn vector32() !void { test "vector f32" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (stage2_x86_64_without_hardware_fma_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -141,7 +143,7 @@ fn vector64() !void { test "vector f64" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (stage2_x86_64_without_hardware_fma_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 406c4035435657aaefe6f8e96642d0db326c7989 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 6 May 2023 23:45:36 -0400 Subject: x86_64: add missing `movsx` and `movzx` encodings --- src/arch/x86_64/encodings.zig | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 607a87b8d9..5096ca5627 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -418,17 +418,21 @@ pub const table = [_]Entry{ .{ .movsx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xbe }, 0, .rex, .none }, .{ .movsx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xbe }, 0, .long, .none }, .{ .movsx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xbf }, 0, .none, .none }, + .{ .movsx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xbf }, 0, .rex, .none }, .{ .movsx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xbf }, 0, .long, .none }, // This instruction is discouraged. .{ .movsxd, .rm, &.{ .r32, .rm32 }, &.{ 0x63 }, 0, .none, .none }, .{ .movsxd, .rm, &.{ .r64, .rm32 }, &.{ 0x63 }, 0, .long, .none }, - .{ .movzx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .short, .none }, - .{ .movzx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .none, .none }, - .{ .movzx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .long, .none }, - .{ .movzx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .none, .none }, - .{ .movzx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .long, .none }, + .{ .movzx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .short, .none }, + .{ .movzx, .rm, &.{ .r16, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .rex_short, .none }, + .{ .movzx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .none, .none }, + .{ .movzx, .rm, &.{ .r32, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .rex, .none }, + .{ .movzx, .rm, &.{ .r64, .rm8 }, &.{ 0x0f, 0xb6 }, 0, .long, .none }, + .{ .movzx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .none, .none }, + .{ .movzx, .rm, &.{ .r32, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .rex, .none }, + .{ .movzx, .rm, &.{ .r64, .rm16 }, &.{ 0x0f, 0xb7 }, 0, .long, .none }, .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .none, .none }, .{ .mul, .m, &.{ .rm8 }, &.{ 0xf6 }, 4, .rex, .none }, -- cgit v1.2.3 From 05580b9453e4ae2d9b62fe4178651937d8b73989 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 May 2023 03:14:31 -0400 Subject: x86_64: implement float cast from `f16` to `f64` --- src/arch/x86_64/CodeGen.zig | 95 ++++++++++++++------ src/arch/x86_64/Encoding.zig | 165 ++++++++++++++++++++--------------- src/arch/x86_64/Lower.zig | 4 + src/arch/x86_64/Mir.zig | 8 ++ src/arch/x86_64/encoder.zig | 33 +++---- src/arch/x86_64/encodings.zig | 195 ++++++++++++++++++++++-------------------- test/behavior/floatop.zig | 3 +- 7 files changed, 292 insertions(+), 211 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 3e47ef63f6..38497400f2 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2287,26 +2287,46 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { src_mcv else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - const dst_lock = self.register_manager.lockReg(dst_mcv.register); + const dst_reg = dst_mcv.getReg().?.to128(); + const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - if (src_bits == 32 and dst_bits == 16 and self.hasFeature(.f16c)) - try self.asmRegisterRegisterImmediate( - .vcvtps2ph, - dst_mcv.register, - if (src_mcv.isRegister()) src_mcv.getReg().? else src_reg: { - const src_reg = dst_mcv.register; - try self.genSetReg(src_reg, src_ty, src_mcv); - break :src_reg src_reg; + if (dst_bits == 16 and self.hasFeature(.f16c)) { + switch (src_bits) { + 32 => { + const mat_src_reg = if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(src_ty, src_mcv); + try self.asmRegisterRegisterImmediate( + .vcvtps2ph, + dst_reg, + mat_src_reg.to128(), + Immediate.u(0b1_00), + ); }, - Immediate.u(0b1_00), - ) - else if (src_bits == 64 and dst_bits == 32) - try self.genBinOpMir(.cvtsd2ss, src_ty, dst_mcv, src_mcv) - else - return self.fail("TODO implement airFptrunc from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), - }); + else => return self.fail("TODO implement airFptrunc from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + }), + } + } else if (src_bits == 64 and dst_bits == 32) { + if (self.hasFeature(.avx)) if (src_mcv.isRegister()) try self.asmRegisterRegisterRegister( + .vcvtsd2ss, + dst_reg, + dst_reg, + src_mcv.getReg().?.to128(), + ) else try self.asmRegisterRegisterMemory( + .vcvtsd2ss, + dst_reg, + dst_reg, + src_mcv.mem(.qword), + ) else if (src_mcv.isRegister()) + try self.asmRegisterRegister(.cvtsd2ss, dst_reg, src_mcv.getReg().?.to128()) + else + try self.asmRegisterMemory(.cvtsd2ss, dst_reg, src_mcv.mem(.qword)); + } else return self.fail("TODO implement airFptrunc from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + }); return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } @@ -2322,22 +2342,41 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { src_mcv else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - const dst_lock = self.register_manager.lockReg(dst_mcv.register); + const dst_reg = dst_mcv.getReg().?.to128(); + const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - try self.genBinOpMir( - if (src_bits == 16 and dst_bits == 32 and self.hasFeature(.f16c)) - .vcvtph2ps - else if (src_bits == 32 and dst_bits == 64) - .cvtss2sd + if (src_bits == 16 and self.hasFeature(.f16c)) { + const mat_src_reg = if (src_mcv.isRegister()) + src_mcv.getReg().? else - return self.fail("TODO implement airFpext from {} to {}", .{ + try self.copyToTmpRegister(src_ty, src_mcv); + try self.asmRegisterRegister(.vcvtph2ps, dst_reg, mat_src_reg.to128()); + switch (dst_bits) { + 32 => {}, + 64 => try self.asmRegisterRegisterRegister(.vcvtss2sd, dst_reg, dst_reg, dst_reg), + else => return self.fail("TODO implement airFpext from {} to {}", .{ src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), - src_ty, - dst_mcv, - src_mcv, - ); + } + } else if (src_bits == 32 and dst_bits == 64) { + if (self.hasFeature(.avx)) if (src_mcv.isRegister()) try self.asmRegisterRegisterRegister( + .vcvtss2sd, + dst_reg, + dst_reg, + src_mcv.getReg().?.to128(), + ) else try self.asmRegisterRegisterMemory( + .vcvtss2sd, + dst_reg, + dst_reg, + src_mcv.mem(.dword), + ) else if (src_mcv.isRegister()) + try self.asmRegisterRegister(.cvtss2sd, dst_reg, src_mcv.getReg().?.to128()) + else + try self.asmRegisterMemory(.cvtss2sd, dst_reg, src_mcv.mem(.dword)); + } else return self.fail("TODO implement airFpext from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + }); return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 1fd1112aaf..bd6e70c975 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -89,30 +89,13 @@ pub fn findByOpcode(opc: []const u8, prefixes: struct { if (modrm_ext) |ext| if (ext != data.modrm_ext) continue; if (!std.mem.eql(u8, opc, enc.opcode())) continue; if (prefixes.rex.w) { - switch (data.mode) { - .none, .short, .rex, .rex_short, .vex_128, .vex_256 => continue, - .long, .vex_128_long, .vex_256_long => {}, - } + if (!data.mode.isLong()) continue; } else if (prefixes.rex.present and !prefixes.rex.isSet()) { - switch (data.mode) { - .rex, .rex_short => {}, - else => continue, - } + if (!data.mode.isRex()) continue; } else if (prefixes.legacy.prefix_66) { - switch (data.mode) { - .short, .rex_short => {}, - .none, .rex, .vex_128, .vex_256 => continue, - .long, .vex_128_long, .vex_256_long => continue, - } + if (!data.mode.isShort()) continue; } else { - switch (data.mode) { - .none => switch (data.mode) { - .short, .rex_short => continue, - .none, .rex, .vex_128, .vex_256 => {}, - .long, .vex_128_long, .vex_256_long => {}, - }, - else => continue, - } + if (data.mode.isShort()) continue; } return enc; }; @@ -148,50 +131,39 @@ pub fn format( _ = fmt; var opc = encoding.opcode(); - switch (encoding.data.mode) { - else => {}, - .long => try writer.writeAll("REX.W + "), - .vex_128, .vex_128_long, .vex_256, .vex_256_long => { - try writer.writeAll("VEX."); - - switch (encoding.data.mode) { - .vex_128, .vex_128_long => try writer.writeAll("128"), - .vex_256, .vex_256_long => try writer.writeAll("256"), - else => unreachable, - } - - switch (opc[0]) { - else => {}, - 0x66, 0xf3, 0xf2 => { - try writer.print(".{X:0>2}", .{opc[0]}); - opc = opc[1..]; - }, - } + if (encoding.data.mode.isVex()) { + try writer.writeAll("VEX."); + + try writer.writeAll(switch (encoding.data.mode) { + .vex_128_w0, .vex_128_w1, .vex_128_wig => "128", + .vex_256_w0, .vex_256_w1, .vex_256_wig => "256", + .vex_lig_w0, .vex_lig_w1, .vex_lig_wig => "LIG", + .vex_lz_w0, .vex_lz_w1, .vex_lz_wig => "LZ", + else => unreachable, + }); - try writer.print(".{X:0>2}", .{opc[0]}); - opc = opc[1..]; + switch (opc[0]) { + else => {}, + 0x66, 0xf3, 0xf2 => { + try writer.print(".{X:0>2}", .{opc[0]}); + opc = opc[1..]; + }, + } - switch (opc[0]) { - else => {}, - 0x38, 0x3A => { - try writer.print("{X:0>2}", .{opc[0]}); - opc = opc[1..]; - }, - } + try writer.print(".{}", .{std.fmt.fmtSliceHexUpper(opc[0 .. opc.len - 1])}); + opc = opc[opc.len - 1 ..]; - try writer.writeByte('.'); - try writer.writeAll(switch (encoding.data.mode) { - .vex_128, .vex_256 => "W0", - .vex_128_long, .vex_256_long => "W1", - else => unreachable, - }); - try writer.writeByte(' '); - }, - } + try writer.writeAll(".W"); + try writer.writeAll(switch (encoding.data.mode) { + .vex_128_w0, .vex_256_w0, .vex_lig_w0, .vex_lz_w0 => "0", + .vex_128_w1, .vex_256_w1, .vex_lig_w1, .vex_lz_w1 => "1", + .vex_128_wig, .vex_256_wig, .vex_lig_wig, .vex_lz_wig => "IG", + else => unreachable, + }); - for (opc) |byte| { - try writer.print("{x:0>2} ", .{byte}); - } + try writer.writeByte(' '); + } else if (encoding.data.mode.isLong()) try writer.writeAll("REX.W + "); + for (opc) |byte| try writer.print("{x:0>2} ", .{byte}); switch (encoding.data.op_en) { .np, .fd, .td, .i, .zi, .d => {}, @@ -332,6 +304,7 @@ pub const Mnemonic = enum { // SSE4.1 roundsd, roundss, // AVX + vcvtsd2ss, vcvtsi2sd, vcvtsi2ss, vcvtss2sd, vmovapd, vmovaps, vmovddup, vmovsd, @@ -629,20 +602,74 @@ pub const Op = enum { }; pub const Mode = enum { + // zig fmt: off none, - short, - long, - rex, - rex_short, - vex_128, - vex_128_long, - vex_256, - vex_256_long, + short, long, + rex, rex_short, + vex_128_w0, vex_128_w1, vex_128_wig, + vex_256_w0, vex_256_w1, vex_256_wig, + vex_lig_w0, vex_lig_w1, vex_lig_wig, + vex_lz_w0, vex_lz_w1, vex_lz_wig, + // zig fmt: on + + pub fn isShort(mode: Mode) bool { + return switch (mode) { + .short, .rex_short => true, + else => false, + }; + } + + pub fn isLong(mode: Mode) bool { + return switch (mode) { + .long, + .vex_128_w1, + .vex_256_w1, + .vex_lig_w1, + .vex_lz_w1, + => true, + else => false, + }; + } + + pub fn isRex(mode: Mode) bool { + return switch (mode) { + else => false, + .rex, .rex_short => true, + }; + } + + pub fn isVex(mode: Mode) bool { + return switch (mode) { + // zig fmt: off + else => false, + .vex_128_w0, .vex_128_w1, .vex_128_wig, + .vex_256_w0, .vex_256_w1, .vex_256_wig, + .vex_lig_w0, .vex_lig_w1, .vex_lig_wig, + .vex_lz_w0, .vex_lz_w1, .vex_lz_wig, + => true, + // zig fmt: on + }; + } + + pub fn isVecLong(mode: Mode) bool { + return switch (mode) { + // zig fmt: off + else => unreachable, + .vex_128_w0, .vex_128_w1, .vex_128_wig, + .vex_lig_w0, .vex_lig_w1, .vex_lig_wig, + .vex_lz_w0, .vex_lz_w1, .vex_lz_wig, + => false, + .vex_256_w0, .vex_256_w1, .vex_256_wig, + => true, + // zig fmt: on + }; + } }; pub const Feature = enum { none, avx, + avx2, f16c, fma, sse, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index a246a97d4b..40a5ccdb10 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -184,6 +184,10 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .roundsd, .roundss, + .vcvtsd2ss, + .vcvtsi2sd, + .vcvtsi2ss, + .vcvtss2sd, .vmovapd, .vmovaps, .vmovddup, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index de7f2cff53..cb1a578bb6 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -282,6 +282,14 @@ pub const Inst = struct { /// Round scalar single-precision floating-point values roundss, + /// Convert scalar double-precision floating-point value to scalar single-precision floating-point value + vcvtsd2ss, + /// Convert doubleword integer to scalar double-precision floating-point value + vcvtsi2sd, + /// Convert doubleword integer to scalar single-precision floating-point value + vcvtsi2ss, + /// Convert scalar single-precision floating-point value to scalar double-precision floating-point value + vcvtss2sd, /// Move aligned packed double-precision floating-point values vmovapd, /// Move aligned packed single-precision floating-point values diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index fa6ce676cb..0ce875240d 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -206,18 +206,15 @@ pub const Instruction = struct { const enc = inst.encoding; const data = enc.data; - switch (data.mode) { - .none, .short, .long, .rex, .rex_short => { - try inst.encodeLegacyPrefixes(encoder); - try inst.encodeMandatoryPrefix(encoder); - try inst.encodeRexPrefix(encoder); - try inst.encodeOpcode(encoder); - }, - .vex_128, .vex_128_long, .vex_256, .vex_256_long => { - try inst.encodeVexPrefix(encoder); - const opc = inst.encoding.opcode(); - try encoder.opcode_1byte(opc[opc.len - 1]); - }, + if (data.mode.isVex()) { + try inst.encodeVexPrefix(encoder); + const opc = inst.encoding.opcode(); + try encoder.opcode_1byte(opc[opc.len - 1]); + } else { + try inst.encodeLegacyPrefixes(encoder); + try inst.encodeMandatoryPrefix(encoder); + try inst.encodeRexPrefix(encoder); + try inst.encodeOpcode(encoder); } switch (data.op_en) { @@ -365,11 +362,7 @@ pub const Instruction = struct { var vex = Vex{}; - vex.w = switch (inst.encoding.data.mode) { - .vex_128, .vex_256 => false, - .vex_128_long, .vex_256_long => true, - else => unreachable, - }; + vex.w = inst.encoding.data.mode.isLong(); switch (op_en) { .np, .i, .zi, .fd, .td, .d => {}, @@ -395,11 +388,7 @@ pub const Instruction = struct { }, } - vex.l = switch (inst.encoding.data.mode) { - .vex_128, .vex_128_long => false, - .vex_256, .vex_256_long => true, - else => unreachable, - }; + vex.l = inst.encoding.data.mode.isVecLong(); vex.p = if (mand_pre) |mand| switch (mand) { 0x66 => .@"66", diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 5096ca5627..5e4dc2f04b 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -918,7 +918,6 @@ pub const table = [_]Entry{ .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .none, .sse2 }, .{ .pextrw, .rmi, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .none, .sse2 }, - .{ .pextrw, .rmi, &.{ .r64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .long, .sse2 }, .{ .pinsrw, .rmi, &.{ .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .none, .sse2 }, @@ -926,31 +925,23 @@ pub const table = [_]Entry{ .{ .pshuflw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf2, 0x0f, 0x70 }, 0, .none, .sse2 }, + .{ .psrlw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .none, .sse2 }, + .{ .psrlw, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .none, .sse2 }, .{ .psrld, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .none, .sse2 }, .{ .psrld, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 2, .none, .sse2 }, - .{ .psrlq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .none, .sse2 }, .{ .psrlq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .none, .sse2 }, - .{ .psrlw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .none, .sse2 }, - .{ .psrlw, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .none, .sse2 }, - - .{ .punpckhbw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x68 }, 0, .none, .sse2 }, - - .{ .punpckhdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6a }, 0, .none, .sse2 }, - + .{ .punpckhbw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x68 }, 0, .none, .sse2 }, + .{ .punpckhwd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x69 }, 0, .none, .sse2 }, + .{ .punpckhdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6a }, 0, .none, .sse2 }, .{ .punpckhqdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6d }, 0, .none, .sse2 }, - .{ .punpckhwd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x69 }, 0, .none, .sse2 }, - - .{ .punpcklbw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x60 }, 0, .none, .sse2 }, - - .{ .punpckldq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .none, .sse2 }, - + .{ .punpcklbw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x60 }, 0, .none, .sse2 }, + .{ .punpcklwd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x61 }, 0, .none, .sse2 }, + .{ .punpckldq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .none, .sse2 }, .{ .punpcklqdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .none, .sse2 }, - .{ .punpcklwd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x61 }, 0, .none, .sse2 }, - .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .none, .sse2 }, .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .none, .sse2 }, @@ -972,106 +963,128 @@ pub const table = [_]Entry{ // SSE4.1 .{ .pextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .none, .sse4_1 }, - .{ .pextrw, .mri, &.{ .r64_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .long, .sse4_1 }, .{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .none, .sse4_1 }, .{ .roundsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .none, .sse4_1 }, // AVX - .{ .vmovapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .vex_128, .avx }, - .{ .vmovapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .vex_128, .avx }, - .{ .vmovapd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x28 }, 0, .vex_256, .avx }, - .{ .vmovapd, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x29 }, 0, .vex_256, .avx }, - - .{ .vmovaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .vex_128, .avx }, - .{ .vmovaps, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x29 }, 0, .vex_128, .avx }, - .{ .vmovaps, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x28 }, 0, .vex_256, .avx }, - .{ .vmovaps, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x0f, 0x29 }, 0, .vex_256, .avx }, - - .{ .vmovddup, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_128, .avx }, - - .{ .vmovsd, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_128, .avx }, - .{ .vmovsd, .rm, &.{ .xmm, .m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_128, .avx }, - .{ .vmovsd, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_128, .avx }, - .{ .vmovsd, .mr, &.{ .m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_128, .avx }, - - .{ .vmovshdup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x16 }, 0, .vex_128, .avx }, + .{ .vcvtsd2ss, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .vex_lig_wig, .avx }, - .{ .vmovsldup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .vex_128, .avx }, + .{ .vcvtsi2sd, .rvm, &.{ .xmm, .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w0, .avx }, + .{ .vcvtsi2sd, .rvm, &.{ .xmm, .xmm, .rm64 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w1, .avx }, - .{ .vmovss, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf3, 0x0f, 0x10 }, 0, .vex_128, .avx }, - .{ .vmovss, .rm, &.{ .xmm, .m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .vex_128, .avx }, - .{ .vmovss, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .vex_128, .avx }, - .{ .vmovss, .mr, &.{ .m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .vex_128, .avx }, + .{ .vcvtsi2ss, .rvm, &.{ .xmm, .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w0, .avx }, + .{ .vcvtsi2ss, .rvm, &.{ .xmm, .xmm, .rm64 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w1, .avx }, - .{ .vmovupd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x10 }, 0, .vex_128, .avx }, - .{ .vmovupd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x11 }, 0, .vex_128, .avx }, - .{ .vmovupd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x10 }, 0, .vex_256, .avx }, - .{ .vmovupd, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x11 }, 0, .vex_256, .avx }, + .{ .vcvtss2sd, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .vex_lig_wig, .avx }, - .{ .vmovups, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x10 }, 0, .vex_128, .avx }, - .{ .vmovups, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x11 }, 0, .vex_128, .avx }, - .{ .vmovups, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x10 }, 0, .vex_256, .avx }, - .{ .vmovups, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x0f, 0x11 }, 0, .vex_256, .avx }, + .{ .vmovapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .vex_128_wig, .avx }, + .{ .vmovapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .vex_128_wig, .avx }, + .{ .vmovapd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x28 }, 0, .vex_256_wig, .avx }, + .{ .vmovapd, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x29 }, 0, .vex_256_wig, .avx }, - .{ .vpextrw, .mri, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x15 }, 0, .vex_128, .avx }, - .{ .vpextrw, .mri, &.{ .r64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x15 }, 0, .vex_128_long, .avx }, - .{ .vpextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .vex_128, .avx }, - .{ .vpextrw, .mri, &.{ .r64_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .vex_128_long, .avx }, + .{ .vmovaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .vex_128_wig, .avx }, + .{ .vmovaps, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x29 }, 0, .vex_128_wig, .avx }, + .{ .vmovaps, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x28 }, 0, .vex_256_wig, .avx }, + .{ .vmovaps, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x0f, 0x29 }, 0, .vex_256_wig, .avx }, - .{ .vpinsrw, .rvmi, &.{ .xmm, .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .vex_128, .avx }, + .{ .vmovddup, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_128_wig, .avx }, + .{ .vmovddup, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_256_wig, .avx }, - .{ .vpsrld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_128, .avx }, - .{ .vpsrld, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 2, .vex_128, .avx }, + .{ .vmovsd, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, + .{ .vmovsd, .rm, &.{ .xmm, .m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, + .{ .vmovsd, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, + .{ .vmovsd, .mr, &.{ .m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, - .{ .vpsrlq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .vex_128, .avx }, - .{ .vpsrlq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .vex_128, .avx }, + .{ .vmovshdup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x16 }, 0, .vex_128_wig, .avx }, + .{ .vmovshdup, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf3, 0x0f, 0x16 }, 0, .vex_256_wig, .avx }, - .{ .vpsrlw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_128, .avx }, - .{ .vpsrlw, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_128, .avx }, + .{ .vmovsldup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .vex_128_wig, .avx }, + .{ .vmovsldup, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .vex_256_wig, .avx }, - .{ .vpunpckhbw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x68 }, 0, .vex_128, .avx }, + .{ .vmovss, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf3, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, + .{ .vmovss, .rm, &.{ .xmm, .m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, + .{ .vmovss, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, + .{ .vmovss, .mr, &.{ .m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, - .{ .vpunpckhdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6a }, 0, .vex_128, .avx }, + .{ .vmovupd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x10 }, 0, .vex_128_wig, .avx }, + .{ .vmovupd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x11 }, 0, .vex_128_wig, .avx }, + .{ .vmovupd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x10 }, 0, .vex_256_wig, .avx }, + .{ .vmovupd, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x11 }, 0, .vex_256_wig, .avx }, - .{ .vpunpckhqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6d }, 0, .vex_128, .avx }, + .{ .vmovups, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x10 }, 0, .vex_128_wig, .avx }, + .{ .vmovups, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x11 }, 0, .vex_128_wig, .avx }, + .{ .vmovups, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x10 }, 0, .vex_256_wig, .avx }, + .{ .vmovups, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x0f, 0x11 }, 0, .vex_256_wig, .avx }, - .{ .vpunpckhwd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x69 }, 0, .vex_128, .avx }, + .{ .vpextrw, .rmi, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x15 }, 0, .vex_128_wig, .avx }, + .{ .vpextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .vex_128_wig, .avx }, - .{ .vpunpcklbw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x60 }, 0, .vex_128, .avx }, + .{ .vpinsrw, .rvmi, &.{ .xmm, .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .vex_128_wig, .avx }, - .{ .vpunpckldq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .vex_128, .avx }, + .{ .vpsrlw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_128_wig, .avx }, + .{ .vpsrlw, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_128_wig, .avx }, + .{ .vpsrld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_128_wig, .avx }, + .{ .vpsrld, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 2, .vex_128_wig, .avx }, + .{ .vpsrlq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .vex_128_wig, .avx }, + .{ .vpsrlq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .vex_128_wig, .avx }, - .{ .vpunpcklqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .vex_128, .avx }, + .{ .vpunpckhbw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x68 }, 0, .vex_128_wig, .avx }, + .{ .vpunpckhwd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x69 }, 0, .vex_128_wig, .avx }, + .{ .vpunpckhdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6a }, 0, .vex_128_wig, .avx }, + .{ .vpunpckhqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6d }, 0, .vex_128_wig, .avx }, - .{ .vpunpcklwd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x61 }, 0, .vex_128, .avx }, + .{ .vpunpcklbw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x60 }, 0, .vex_128_wig, .avx }, + .{ .vpunpcklwd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x61 }, 0, .vex_128_wig, .avx }, + .{ .vpunpckldq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .vex_128_wig, .avx }, + .{ .vpunpcklqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .vex_128_wig, .avx }, // F16C - .{ .vcvtph2ps, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_128, .f16c }, + .{ .vcvtph2ps, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_128_w0, .f16c }, + .{ .vcvtph2ps, .rm, &.{ .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_256_w0, .f16c }, - .{ .vcvtps2ph, .mri, &.{ .xmm_m64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x1d }, 0, .vex_128, .f16c }, + .{ .vcvtps2ph, .mri, &.{ .xmm_m64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x1d }, 0, .vex_128_w0, .f16c }, + .{ .vcvtps2ph, .mri, &.{ .xmm_m128, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x1d }, 0, .vex_256_w0, .f16c }, // FMA - .{ .vfmadd132pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_128_long, .fma }, - .{ .vfmadd132pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_256_long, .fma }, - .{ .vfmadd213pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_128_long, .fma }, - .{ .vfmadd213pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_256_long, .fma }, - .{ .vfmadd231pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_128_long, .fma }, - .{ .vfmadd231pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_256_long, .fma }, - - .{ .vfmadd132ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_128, .fma }, - .{ .vfmadd132ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_256, .fma }, - .{ .vfmadd213ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_128, .fma }, - .{ .vfmadd213ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_256, .fma }, - .{ .vfmadd231ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_128, .fma }, - .{ .vfmadd231ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_256, .fma }, - - .{ .vfmadd132sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x99 }, 0, .vex_128_long, .fma }, - .{ .vfmadd213sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0xa9 }, 0, .vex_128_long, .fma }, - .{ .vfmadd231sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_128_long, .fma }, - - .{ .vfmadd132ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0x99 }, 0, .vex_128, .fma }, - .{ .vfmadd213ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xa9 }, 0, .vex_128, .fma }, - .{ .vfmadd231ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_128, .fma }, + .{ .vfmadd132pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_128_w1, .fma }, + .{ .vfmadd213pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_128_w1, .fma }, + .{ .vfmadd231pd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_128_w1, .fma }, + .{ .vfmadd132pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_256_w1, .fma }, + .{ .vfmadd213pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_256_w1, .fma }, + .{ .vfmadd231pd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_256_w1, .fma }, + + .{ .vfmadd132ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_128_w0, .fma }, + .{ .vfmadd213ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_128_w0, .fma }, + .{ .vfmadd231ps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_128_w0, .fma }, + .{ .vfmadd132ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x98 }, 0, .vex_256_w0, .fma }, + .{ .vfmadd213ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xa8 }, 0, .vex_256_w0, .fma }, + .{ .vfmadd231ps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0xb8 }, 0, .vex_256_w0, .fma }, + + .{ .vfmadd132sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x99 }, 0, .vex_lig_w1, .fma }, + .{ .vfmadd213sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0xa9 }, 0, .vex_lig_w1, .fma }, + .{ .vfmadd231sd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_lig_w1, .fma }, + + .{ .vfmadd132ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0x99 }, 0, .vex_lig_w0, .fma }, + .{ .vfmadd213ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xa9 }, 0, .vex_lig_w0, .fma }, + .{ .vfmadd231ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_lig_w0, .fma }, + + // AVX2 + .{ .vpsrlw, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_256_wig, .avx2 }, + .{ .vpsrlw, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_256_wig, .avx2 }, + .{ .vpsrld, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_256_wig, .avx2 }, + .{ .vpsrld, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x72 }, 2, .vex_256_wig, .avx2 }, + .{ .vpsrlq, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .vex_256_wig, .avx2 }, + .{ .vpsrlq, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .vex_256_wig, .avx2 }, + + .{ .vpunpckhbw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x68 }, 0, .vex_256_wig, .avx2 }, + .{ .vpunpckhwd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x69 }, 0, .vex_256_wig, .avx2 }, + .{ .vpunpckhdq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6a }, 0, .vex_256_wig, .avx2 }, + .{ .vpunpckhqdq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6d }, 0, .vex_256_wig, .avx2 }, + + .{ .vpunpcklbw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x60 }, 0, .vex_256_wig, .avx2 }, + .{ .vpunpcklwd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x61 }, 0, .vex_256_wig, .avx2 }, + .{ .vpunpckldq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x62 }, 0, .vex_256_wig, .avx2 }, + .{ .vpunpcklqdq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6c }, 0, .vex_256_wig, .avx2 }, }; // zig fmt: on diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index b98d782da1..ec24407d9f 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -52,7 +52,8 @@ fn testFloatComparisons() !void { } test "different sized float comparisons" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 5c5da179fb930c9d8be9366a851eb4a36f4044f1 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 May 2023 03:47:56 -0400 Subject: x86_64: implement `@sqrt` for vectors --- src/arch/x86_64/CodeGen.zig | 221 ++++++++++++++++++++++++++---------------- src/arch/x86_64/Encoding.zig | 1 + src/arch/x86_64/Lower.zig | 4 + src/arch/x86_64/Mir.zig | 8 ++ src/arch/x86_64/encodings.zig | 18 +++- 5 files changed, 164 insertions(+), 88 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 38497400f2..19878bae17 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4520,25 +4520,69 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: Immediate) !void { fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); + const abi_size = @intCast(u32, ty.abiSize(self.target.*)); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) src_mcv else try self.copyToRegisterWithInstTracking(inst, ty, src_mcv); + const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); + const dst_lock = self.register_manager.lockReg(dst_reg); + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - try self.genBinOpMir(switch (ty.zigTypeTag()) { - .Float => switch (ty.floatBits(self.target.*)) { - 32 => .sqrtss, - 64 => .sqrtsd, - else => return self.fail("TODO implement airSqrt for {}", .{ - ty.fmt(self.bin_file.options.module.?), - }), + const tag = if (@as(?Mir.Inst.Tag, switch (ty.zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 32 => if (self.hasFeature(.avx)) .vsqrtss else .sqrtss, + 64 => if (self.hasFeature(.avx)) .vsqrtsd else .sqrtsd, + 16, 80, 128 => null, + else => unreachable, }, - else => return self.fail("TODO implement airSqrt for {}", .{ - ty.fmt(self.bin_file.options.module.?), - }), - }, ty, dst_mcv, src_mcv); + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 32 => switch (ty.vectorLen()) { + 1 => if (self.hasFeature(.avx)) .vsqrtss else .sqrtss, + 2...4 => if (self.hasFeature(.avx)) .vsqrtps else .sqrtps, + 5...8 => if (self.hasFeature(.avx)) .vsqrtps else null, + else => null, + }, + 64 => switch (ty.vectorLen()) { + 1 => if (self.hasFeature(.avx)) .vsqrtsd else .sqrtsd, + 2 => if (self.hasFeature(.avx)) .vsqrtpd else .sqrtpd, + 3...4 => if (self.hasFeature(.avx)) .vsqrtpd else null, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + else => unreachable, + }, + else => unreachable, + })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }); + switch (tag) { + .vsqrtss, .vsqrtsd => if (src_mcv.isRegister()) try self.asmRegisterRegisterRegister( + tag, + dst_reg, + dst_reg, + registerAlias(src_mcv.getReg().?, abi_size), + ) else try self.asmRegisterRegisterMemory( + tag, + dst_reg, + dst_reg, + src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + ), + else => if (src_mcv.isRegister()) try self.asmRegisterRegister( + tag, + dst_reg, + registerAlias(src_mcv.getReg().?, abi_size), + ) else try self.asmRegisterMemory( + tag, + dst_reg, + src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + ), + } return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } @@ -9544,85 +9588,92 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { lock.* = self.register_manager.lockRegAssumeUnused(reg); } - const tag: ?Mir.Inst.Tag = + const tag = if (@as( + ?Mir.Inst.Tag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) - switch (ty.zigTypeTag()) { - .Float => switch (ty.floatBits(self.target.*)) { - 32 => .vfmadd132ss, - 64 => .vfmadd132sd, - else => null, - }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { - 1 => .vfmadd132ss, - 2...8 => .vfmadd132ps, - else => null, - }, - 64 => switch (ty.vectorLen()) { - 1 => .vfmadd132sd, - 2...4 => .vfmadd132pd, - else => null, - }, - else => null, + switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .vfmadd132ss, + 64 => .vfmadd132sd, + 16, 80, 128 => null, + else => unreachable, }, - else => null, - }, - else => unreachable, - } - else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) - switch (ty.zigTypeTag()) { - .Float => switch (ty.floatBits(self.target.*)) { - 32 => .vfmadd213ss, - 64 => .vfmadd213sd, - else => null, - }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { - 1 => .vfmadd213ss, - 2...8 => .vfmadd213ps, - else => null, - }, - 64 => switch (ty.vectorLen()) { - 1 => .vfmadd213sd, - 2...4 => .vfmadd213pd, - else => null, + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 32 => switch (ty.vectorLen()) { + 1 => .vfmadd132ss, + 2...8 => .vfmadd132ps, + else => null, + }, + 64 => switch (ty.vectorLen()) { + 1 => .vfmadd132sd, + 2...4 => .vfmadd132pd, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, }, - else => null, + else => unreachable, }, - else => null, - }, - else => unreachable, - } - else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) - switch (ty.zigTypeTag()) { - .Float => switch (ty.floatBits(self.target.*)) { - 32 => .vfmadd231ss, - 64 => .vfmadd231sd, - else => null, - }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { - 1 => .vfmadd231ss, - 2...8 => .vfmadd231ps, - else => null, + else => unreachable, + } + else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) + switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .vfmadd213ss, + 64 => .vfmadd213sd, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 32 => switch (ty.vectorLen()) { + 1 => .vfmadd213ss, + 2...8 => .vfmadd213ps, + else => null, + }, + 64 => switch (ty.vectorLen()) { + 1 => .vfmadd213sd, + 2...4 => .vfmadd213pd, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, }, - 64 => switch (ty.vectorLen()) { - 1 => .vfmadd231sd, - 2...4 => .vfmadd231pd, - else => null, + else => unreachable, + }, + else => unreachable, + } + else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) + switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .vfmadd231ss, + 64 => .vfmadd231sd, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 32 => switch (ty.vectorLen()) { + 1 => .vfmadd231ss, + 2...8 => .vfmadd231ps, + else => null, + }, + 64 => switch (ty.vectorLen()) { + 1 => .vfmadd231sd, + 2...4 => .vfmadd231pd, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, }, - else => null, + else => unreachable, }, - else => null, - }, - else => null, - } - else - unreachable; - if (tag == null) return self.fail("TODO implement airMulAdd for {}", .{ + else => unreachable, + } + else + unreachable, + )) |tag| tag else return self.fail("TODO implement airMulAdd for {}", .{ ty.fmt(self.bin_file.options.module.?), }); @@ -9634,14 +9685,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( - tag.?, + tag, mop1_reg, mop2_reg, registerAlias(mops[2].getReg().?, abi_size), ) else try self.asmRegisterRegisterMemory( - tag.?, + tag, mop1_reg, mop2_reg, mops[2].mem(Memory.PtrSize.fromSize(abi_size)), diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index bd6e70c975..b242c98bdc 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -316,6 +316,7 @@ pub const Mnemonic = enum { vpsrld, vpsrlq, vpsrlw, vpunpckhbw, vpunpckhdq, vpunpckhqdq, vpunpckhwd, vpunpcklbw, vpunpckldq, vpunpcklqdq, vpunpcklwd, + vsqrtpd, vsqrtps, vsqrtsd, vsqrtss, // F16C vcvtph2ps, vcvtps2ph, // FMA diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 40a5ccdb10..39ad2313e7 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -212,6 +212,10 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .vpunpckldq, .vpunpcklqdq, .vpunpcklwd, + .vsqrtpd, + .vsqrtps, + .vsqrtsd, + .vsqrtss, .vcvtph2ps, .vcvtps2ph, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index cb1a578bb6..b6df0fff09 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -338,6 +338,14 @@ pub const Inst = struct { vpunpcklqdq, /// Unpack low data vpunpcklwd, + /// Square root of packed double-precision floating-point value + vsqrtpd, + /// Square root of packed single-precision floating-point value + vsqrtps, + /// Square root of scalar double-precision floating-point value + vsqrtsd, + /// Square root of scalar single-precision floating-point value + vsqrtss, /// Convert 16-bit floating-point values to single-precision floating-point values vcvtph2ps, diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 5e4dc2f04b..49ebc344fd 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -869,8 +869,9 @@ pub const table = [_]Entry{ .{ .subss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .none, .sse }, - .{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .none, .sse }, - .{ .sqrtss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .none, .sse }, + .{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .none, .sse }, + + .{ .sqrtss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .none, .sse }, .{ .ucomiss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0x0f, 0x2e }, 0, .none, .sse }, @@ -943,7 +944,8 @@ pub const table = [_]Entry{ .{ .punpcklqdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .none, .sse2 }, .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .none, .sse2 }, - .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .none, .sse2 }, + + .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .none, .sse2 }, .{ .subsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .none, .sse2 }, @@ -1039,6 +1041,16 @@ pub const table = [_]Entry{ .{ .vpunpckldq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .vex_128_wig, .avx }, .{ .vpunpcklqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .vex_128_wig, .avx }, + .{ .vsqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .vex_128_wig, .avx }, + .{ .vsqrtpd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x51 }, 0, .vex_256_wig, .avx }, + + .{ .vsqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .vex_128_wig, .avx }, + .{ .vsqrtps, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x51 }, 0, .vex_256_wig, .avx }, + + .{ .vsqrtsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f }, 0, .vex_lig_wig, .avx }, + + .{ .vsqrtss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f }, 0, .vex_lig_wig, .avx }, + // F16C .{ .vcvtph2ps, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_128_w0, .f16c }, .{ .vcvtph2ps, .rm, &.{ .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_256_w0, .f16c }, -- cgit v1.2.3 From ea957c4cff77f045108863cb5552b3511cb455c1 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 May 2023 05:01:37 -0400 Subject: x86_64: implement `@sqrt` for `f16` scalars and vectors --- src/arch/x86_64/CodeGen.zig | 156 +++++++++++++++++++++++++++++------------- src/arch/x86_64/encodings.zig | 4 +- test/behavior/floatop.zig | 1 - 3 files changed, 109 insertions(+), 52 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 19878bae17..6337ad23f5 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4531,59 +4531,117 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const tag = if (@as(?Mir.Inst.Tag, switch (ty.zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => if (self.hasFeature(.avx)) .vsqrtss else .sqrtss, - 64 => if (self.hasFeature(.avx)) .vsqrtsd else .sqrtsd, - 16, 80, 128 => null, - else => unreachable, - }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .vsqrtss else .sqrtss, - 2...4 => if (self.hasFeature(.avx)) .vsqrtps else .sqrtps, - 5...8 => if (self.hasFeature(.avx)) .vsqrtps else null, - else => null, - }, - 64 => switch (ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .vsqrtsd else .sqrtsd, - 2 => if (self.hasFeature(.avx)) .vsqrtpd else .sqrtpd, - 3...4 => if (self.hasFeature(.avx)) .vsqrtpd else null, - else => null, + const result: MCValue = result: { + const tag = if (@as(?Mir.Inst.Tag, switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) { + const mat_src_reg = if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(ty, src_mcv); + try self.asmRegisterRegister(.vcvtph2ps, dst_reg, mat_src_reg.to128()); + try self.asmRegisterRegisterRegister(.vsqrtss, dst_reg, dst_reg, dst_reg); + try self.asmRegisterRegisterImmediate( + .vcvtps2ph, + dst_reg, + dst_reg, + Immediate.u(0b1_00), + ); + break :result dst_mcv; + } else null, + 32 => if (self.hasFeature(.avx)) .vsqrtss else .sqrtss, + 64 => if (self.hasFeature(.avx)) .vsqrtsd else .sqrtsd, + 80, 128 => null, + else => unreachable, + }, + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { + 1 => { + const mat_src_reg = if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(ty, src_mcv); + try self.asmRegisterRegister(.vcvtph2ps, dst_reg, mat_src_reg.to128()); + try self.asmRegisterRegisterRegister(.vsqrtss, dst_reg, dst_reg, dst_reg); + try self.asmRegisterRegisterImmediate( + .vcvtps2ph, + dst_reg, + dst_reg, + Immediate.u(0b1_00), + ); + break :result dst_mcv; + }, + 2...8 => { + const wide_reg = registerAlias(dst_reg, abi_size * 2); + if (src_mcv.isRegister()) try self.asmRegisterRegister( + .vcvtph2ps, + wide_reg, + src_mcv.getReg().?.to128(), + ) else try self.asmRegisterMemory( + .vcvtph2ps, + wide_reg, + src_mcv.mem(Memory.PtrSize.fromSize( + @intCast(u32, @divExact(wide_reg.bitSize(), 16)), + )), + ); + try self.asmRegisterRegister(.vsqrtps, wide_reg, wide_reg); + try self.asmRegisterRegisterImmediate( + .vcvtps2ph, + dst_reg, + wide_reg, + Immediate.u(0b1_00), + ); + break :result dst_mcv; + }, + else => null, + } else null, + 32 => switch (ty.vectorLen()) { + 1 => if (self.hasFeature(.avx)) .vsqrtss else .sqrtss, + 2...4 => if (self.hasFeature(.avx)) .vsqrtps else .sqrtps, + 5...8 => if (self.hasFeature(.avx)) .vsqrtps else null, + else => null, + }, + 64 => switch (ty.vectorLen()) { + 1 => if (self.hasFeature(.avx)) .vsqrtsd else .sqrtsd, + 2 => if (self.hasFeature(.avx)) .vsqrtpd else .sqrtpd, + 3...4 => if (self.hasFeature(.avx)) .vsqrtpd else null, + else => null, + }, + 80, 128 => null, + else => unreachable, }, - 16, 80, 128 => null, else => unreachable, }, else => unreachable, - }, - else => unreachable, - })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ - ty.fmt(self.bin_file.options.module.?), - }); - switch (tag) { - .vsqrtss, .vsqrtsd => if (src_mcv.isRegister()) try self.asmRegisterRegisterRegister( - tag, - dst_reg, - dst_reg, - registerAlias(src_mcv.getReg().?, abi_size), - ) else try self.asmRegisterRegisterMemory( - tag, - dst_reg, - dst_reg, - src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), - ), - else => if (src_mcv.isRegister()) try self.asmRegisterRegister( - tag, - dst_reg, - registerAlias(src_mcv.getReg().?, abi_size), - ) else try self.asmRegisterMemory( - tag, - dst_reg, - src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), - ), - } - return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); + })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }); + switch (tag) { + .vsqrtss, .vsqrtsd => if (src_mcv.isRegister()) try self.asmRegisterRegisterRegister( + tag, + dst_reg, + dst_reg, + registerAlias(src_mcv.getReg().?, abi_size), + ) else try self.asmRegisterRegisterMemory( + tag, + dst_reg, + dst_reg, + src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + ), + else => if (src_mcv.isRegister()) try self.asmRegisterRegister( + tag, + dst_reg, + registerAlias(src_mcv.getReg().?, abi_size), + ) else try self.asmRegisterMemory( + tag, + dst_reg, + src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + ), + } + break :result dst_mcv; + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 49ebc344fd..78bda4fc76 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -1047,9 +1047,9 @@ pub const table = [_]Entry{ .{ .vsqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .vex_128_wig, .avx }, .{ .vsqrtps, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x51 }, 0, .vex_256_wig, .avx }, - .{ .vsqrtsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f }, 0, .vex_lig_wig, .avx }, + .{ .vsqrtsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .vex_lig_wig, .avx }, - .{ .vsqrtss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f }, 0, .vex_lig_wig, .avx }, + .{ .vsqrtss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .vex_lig_wig, .avx }, // F16C .{ .vcvtph2ps, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_128_w0, .f16c }, diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index ec24407d9f..3f407061f4 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -135,7 +135,6 @@ fn testSqrt() !void { test "@sqrt with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 057139fda575e0e6038b821256a45669cd70a073 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 May 2023 09:06:12 -0400 Subject: x86_64: implement binary operations for float vectors --- src/arch/x86_64/CodeGen.zig | 642 +++++++++++++++++++++++++----------------- src/arch/x86_64/Encoding.zig | 34 ++- src/arch/x86_64/Lower.zig | 49 ++++ src/arch/x86_64/Mir.zig | 115 +++++++- src/arch/x86_64/encodings.zig | 101 ++++++- 5 files changed, 651 insertions(+), 290 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 6337ad23f5..8c6f14ec3a 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1176,6 +1176,21 @@ fn asmRegisterRegisterRegister( }); } +fn asmRegisterRegisterRegisterImmediate( + self: *Self, + tag: Mir.Inst.Tag, + reg1: Register, + reg2: Register, + reg3: Register, + imm: Immediate, +) !void { + _ = try self.addInst(.{ + .tag = tag, + .ops = .rrri, + .data = .{ .rrri = .{ .r1 = reg1, .r2 = reg2, .r3 = reg3, .i = @intCast(u8, imm.unsigned) } }, + }); +} + fn asmRegisterRegisterImmediate( self: *Self, tag: Mir.Inst.Tag, @@ -2310,20 +2325,31 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { }), } } else if (src_bits == 64 and dst_bits == 32) { - if (self.hasFeature(.avx)) if (src_mcv.isRegister()) try self.asmRegisterRegisterRegister( + if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( .vcvtsd2ss, dst_reg, dst_reg, - src_mcv.getReg().?.to128(), - ) else try self.asmRegisterRegisterMemory( + src_mcv.mem(.qword), + ) else try self.asmRegisterRegisterRegister( .vcvtsd2ss, dst_reg, dst_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(src_ty, src_mcv)).to128(), + ) else if (src_mcv.isMemory()) try self.asmRegisterMemory( + .cvtsd2ss, + dst_reg, src_mcv.mem(.qword), - ) else if (src_mcv.isRegister()) - try self.asmRegisterRegister(.cvtsd2ss, dst_reg, src_mcv.getReg().?.to128()) - else - try self.asmRegisterMemory(.cvtsd2ss, dst_reg, src_mcv.mem(.qword)); + ) else try self.asmRegisterRegister( + .cvtsd2ss, + dst_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(src_ty, src_mcv)).to128(), + ); } else return self.fail("TODO implement airFptrunc from {} to {}", .{ src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }); @@ -2360,20 +2386,31 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { }), } } else if (src_bits == 32 and dst_bits == 64) { - if (self.hasFeature(.avx)) if (src_mcv.isRegister()) try self.asmRegisterRegisterRegister( + if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( .vcvtss2sd, dst_reg, dst_reg, - src_mcv.getReg().?.to128(), - ) else try self.asmRegisterRegisterMemory( + src_mcv.mem(.dword), + ) else try self.asmRegisterRegisterRegister( .vcvtss2sd, dst_reg, dst_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(src_ty, src_mcv)).to128(), + ) else if (src_mcv.isMemory()) try self.asmRegisterMemory( + .cvtss2sd, + dst_reg, src_mcv.mem(.dword), - ) else if (src_mcv.isRegister()) - try self.asmRegisterRegister(.cvtss2sd, dst_reg, src_mcv.getReg().?.to128()) - else - try self.asmRegisterMemory(.cvtss2sd, dst_reg, src_mcv.mem(.dword)); + ) else try self.asmRegisterRegister( + .cvtss2sd, + dst_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(src_ty, src_mcv)).to128(), + ); } else return self.fail("TODO implement airFpext from {} to {}", .{ src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }); @@ -4532,7 +4569,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { - const tag = if (@as(?Mir.Inst.Tag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.Tag, switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { const mat_src_reg = if (src_mcv.isRegister()) @@ -4558,11 +4595,14 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { .Float => switch (ty.childType().floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { 1 => { - const mat_src_reg = if (src_mcv.isRegister()) - src_mcv.getReg().? - else - try self.copyToTmpRegister(ty, src_mcv); - try self.asmRegisterRegister(.vcvtph2ps, dst_reg, mat_src_reg.to128()); + try self.asmRegisterRegister( + .vcvtph2ps, + dst_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(ty, src_mcv)).to128(), + ); try self.asmRegisterRegisterRegister(.vsqrtss, dst_reg, dst_reg, dst_reg); try self.asmRegisterRegisterImmediate( .vcvtps2ph, @@ -4574,16 +4614,19 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, 2...8 => { const wide_reg = registerAlias(dst_reg, abi_size * 2); - if (src_mcv.isRegister()) try self.asmRegisterRegister( - .vcvtph2ps, - wide_reg, - src_mcv.getReg().?.to128(), - ) else try self.asmRegisterMemory( + if (src_mcv.isMemory()) try self.asmRegisterMemory( .vcvtph2ps, wide_reg, src_mcv.mem(Memory.PtrSize.fromSize( @intCast(u32, @divExact(wide_reg.bitSize(), 16)), )), + ) else try self.asmRegisterRegister( + .vcvtph2ps, + wide_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(ty, src_mcv)).to128(), ); try self.asmRegisterRegister(.vsqrtps, wide_reg, wide_reg); try self.asmRegisterRegisterImmediate( @@ -4617,26 +4660,32 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - switch (tag) { - .vsqrtss, .vsqrtsd => if (src_mcv.isRegister()) try self.asmRegisterRegisterRegister( - tag, + switch (mir_tag) { + .vsqrtss, .vsqrtsd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( + mir_tag, dst_reg, dst_reg, - registerAlias(src_mcv.getReg().?, abi_size), - ) else try self.asmRegisterRegisterMemory( - tag, + src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + ) else try self.asmRegisterRegisterRegister( + mir_tag, dst_reg, dst_reg, - src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + registerAlias(if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(ty, src_mcv), abi_size), ), - else => if (src_mcv.isRegister()) try self.asmRegisterRegister( - tag, - dst_reg, - registerAlias(src_mcv.getReg().?, abi_size), - ) else try self.asmRegisterMemory( - tag, + else => if (src_mcv.isMemory()) try self.asmRegisterMemory( + mir_tag, dst_reg, src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + ) else try self.asmRegisterRegister( + mir_tag, + dst_reg, + registerAlias(if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(ty, src_mcv), abi_size), ), } break :result dst_mcv; @@ -5800,25 +5849,22 @@ fn genMulDivBinOp( } } -/// Result is always a register. fn genBinOp( self: *Self, maybe_inst: ?Air.Inst.Index, - tag: Air.Inst.Tag, + air_tag: Air.Inst.Tag, lhs_air: Air.Inst.Ref, rhs_air: Air.Inst.Ref, ) !MCValue { - const lhs = try self.resolveInst(lhs_air); - const rhs = try self.resolveInst(rhs_air); + const lhs_mcv = try self.resolveInst(lhs_air); + const rhs_mcv = try self.resolveInst(rhs_air); const lhs_ty = self.air.typeOf(lhs_air); const rhs_ty = self.air.typeOf(rhs_air); - if (lhs_ty.zigTypeTag() == .Vector) { - return self.fail("TODO implement genBinOp for {}", .{lhs_ty.fmt(self.bin_file.options.module.?)}); - } + const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*)); - switch (lhs) { + switch (lhs_mcv) { .immediate => |imm| switch (imm) { - 0 => switch (tag) { + 0 => switch (air_tag) { .sub, .subwrap => return self.genUnOp(maybe_inst, .neg, rhs_air), else => {}, }, @@ -5827,9 +5873,10 @@ fn genBinOp( else => {}, } - const is_commutative = switch (tag) { + const is_commutative = switch (air_tag) { .add, .addwrap, + .mul, .bool_or, .bit_or, .bool_and, @@ -5841,48 +5888,42 @@ fn genBinOp( else => false, }; - const dst_mem_ok = switch (tag) { - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .div_float, - .div_exact, - .div_trunc, - .div_floor, - => !lhs_ty.isRuntimeFloat(), - - else => true, + const vec_op = switch (lhs_ty.zigTypeTag()) { + else => false, + .Float, .Vector => true, }; - const lhs_lock: ?RegisterLock = switch (lhs) { + const lhs_lock: ?RegisterLock = switch (lhs_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock); - const rhs_lock: ?RegisterLock = switch (rhs) { + const rhs_lock: ?RegisterLock = switch (rhs_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - var flipped: bool = false; + var flipped = false; + var copied_to_dst = true; const dst_mcv: MCValue = dst: { if (maybe_inst) |inst| { - if ((dst_mem_ok or lhs.isRegister()) and self.reuseOperand(inst, lhs_air, 0, lhs)) { - break :dst lhs; + if ((!vec_op or lhs_mcv.isRegister()) and self.reuseOperand(inst, lhs_air, 0, lhs_mcv)) { + break :dst lhs_mcv; } - if (is_commutative and (dst_mem_ok or rhs.isRegister()) and - self.reuseOperand(inst, rhs_air, 1, rhs)) + if (is_commutative and (!vec_op or rhs_mcv.isRegister()) and + self.reuseOperand(inst, rhs_air, 1, rhs_mcv)) { flipped = true; - break :dst rhs; + break :dst rhs_mcv; } } const dst_mcv = try self.allocRegOrMemAdvanced(lhs_ty, maybe_inst, true); - try self.genCopy(lhs_ty, dst_mcv, lhs); + if (vec_op and lhs_mcv.isRegister() and self.hasFeature(.avx)) + copied_to_dst = false + else + try self.genCopy(lhs_ty, dst_mcv, lhs_mcv); break :dst dst_mcv; }; const dst_lock: ?RegisterLock = switch (dst_mcv) { @@ -5891,160 +5932,47 @@ fn genBinOp( }; defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const src_mcv = if (flipped) lhs else rhs; - switch (tag) { - .add, - .addwrap, - => try self.genBinOpMir(switch (lhs_ty.zigTypeTag()) { - else => .add, - .Float => switch (lhs_ty.floatBits(self.target.*)) { - 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) - .addss - else - return self.fail("TODO implement genBinOp for {s} {} without sse", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) - .addsd - else - return self.fail("TODO implement genBinOp for {s} {} without sse2", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - else => return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - }, - }, lhs_ty, dst_mcv, src_mcv), - - .sub, - .subwrap, - => try self.genBinOpMir(switch (lhs_ty.zigTypeTag()) { - else => .sub, - .Float => switch (lhs_ty.floatBits(self.target.*)) { - 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) - .subss - else - return self.fail("TODO implement genBinOp for {s} {} without sse", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) - .subsd - else - return self.fail("TODO implement genBinOp for {s} {} without sse2", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - else => return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - }, - }, lhs_ty, dst_mcv, src_mcv), - - .mul => try self.genBinOpMir(switch (lhs_ty.zigTypeTag()) { - else => return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - .Float => switch (lhs_ty.floatBits(self.target.*)) { - 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) - .mulss - else - return self.fail("TODO implement genBinOp for {s} {} without sse", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) - .mulsd - else - return self.fail("TODO implement genBinOp for {s} {} without sse2", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - else => return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - }, - }, lhs_ty, dst_mcv, src_mcv), + const src_mcv = if (flipped) lhs_mcv else rhs_mcv; + if (!vec_op) { + switch (air_tag) { + .add, + .addwrap, + => try self.genBinOpMir(.add, lhs_ty, dst_mcv, src_mcv), - .div_float, - .div_exact, - .div_trunc, - .div_floor, - => { - try self.genBinOpMir(switch (lhs_ty.zigTypeTag()) { - else => return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - .Float => switch (lhs_ty.floatBits(self.target.*)) { - 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) - .divss - else - return self.fail("TODO implement genBinOp for {s} {} without sse", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) - .divsd - else - return self.fail("TODO implement genBinOp for {s} {} without sse2", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - else => return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - }, - }, lhs_ty, dst_mcv, src_mcv); - switch (tag) { - .div_float, - .div_exact, - => {}, - .div_trunc, - .div_floor, - => if (self.hasFeature(.sse4_1)) { - const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*)); - const dst_alias = registerAlias(dst_mcv.register, abi_size); - try self.asmRegisterRegisterImmediate(switch (lhs_ty.floatBits(self.target.*)) { - 32 => .roundss, - 64 => .roundsd, - else => unreachable, - }, dst_alias, dst_alias, Immediate.u(switch (tag) { - .div_trunc => 0b1_0_11, - .div_floor => 0b1_0_01, - else => unreachable, - })); - } else return self.fail("TODO implement genBinOp for {s} {} without sse4_1", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - else => unreachable, - } - }, + .sub, + .subwrap, + => try self.genBinOpMir(.sub, lhs_ty, dst_mcv, src_mcv), - .ptr_add, - .ptr_sub, - => { - const tmp_reg = try self.copyToTmpRegister(rhs_ty, src_mcv); - const tmp_mcv = MCValue{ .register = tmp_reg }; - const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); - defer self.register_manager.unlockReg(tmp_lock); + .ptr_add, + .ptr_sub, + => { + const tmp_reg = try self.copyToTmpRegister(rhs_ty, src_mcv); + const tmp_mcv = MCValue{ .register = tmp_reg }; + const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); + defer self.register_manager.unlockReg(tmp_lock); - const elem_size = lhs_ty.elemType2().abiSize(self.target.*); - try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size }); - try self.genBinOpMir(switch (tag) { - .ptr_add => .add, - .ptr_sub => .sub, - else => unreachable, - }, lhs_ty, dst_mcv, tmp_mcv); - }, + const elem_size = lhs_ty.elemType2().abiSize(self.target.*); + try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size }); + try self.genBinOpMir(switch (air_tag) { + .ptr_add => .add, + .ptr_sub => .sub, + else => unreachable, + }, lhs_ty, dst_mcv, tmp_mcv); + }, - .bool_or, - .bit_or, - => try self.genBinOpMir(.@"or", lhs_ty, dst_mcv, src_mcv), + .bool_or, + .bit_or, + => try self.genBinOpMir(.@"or", lhs_ty, dst_mcv, src_mcv), - .bool_and, - .bit_and, - => try self.genBinOpMir(.@"and", lhs_ty, dst_mcv, src_mcv), + .bool_and, + .bit_and, + => try self.genBinOpMir(.@"and", lhs_ty, dst_mcv, src_mcv), - .xor => try self.genBinOpMir(.xor, lhs_ty, dst_mcv, src_mcv), + .xor => try self.genBinOpMir(.xor, lhs_ty, dst_mcv, src_mcv), - .min, - .max, - => switch (lhs_ty.zigTypeTag()) { - .Int => { + .min, + .max, + => { const mat_src_mcv: MCValue = if (switch (src_mcv) { .immediate, .eflags, @@ -6070,12 +5998,12 @@ fn genBinOp( const int_info = lhs_ty.intInfo(self.target.*); const cc: Condition = switch (int_info.signedness) { - .unsigned => switch (tag) { + .unsigned => switch (air_tag) { .min => .a, .max => .b, else => unreachable, }, - .signed => switch (tag) { + .signed => switch (air_tag) { .min => .g, .max => .l, else => unreachable, @@ -6134,26 +6062,222 @@ fn genBinOp( } try self.genCopy(lhs_ty, dst_mcv, .{ .register = tmp_reg }); }, - .Float => try self.genBinOpMir(switch (lhs_ty.floatBits(self.target.*)) { - 32 => switch (tag) { - .min => .minss, - .max => .maxss, - else => unreachable, - }, - 64 => switch (tag) { - .min => .minsd, - .max => .maxsd, - else => unreachable, - }, - else => return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), - }, lhs_ty, dst_mcv, src_mcv), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }), - }, + } + return dst_mcv; + } + const mir_tag = if (@as(?Mir.Inst.Tag, switch (lhs_ty.zigTypeTag()) { + else => unreachable, + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => switch (air_tag) { + .add => if (self.hasFeature(.avx)) .vaddss else .addss, + .sub => if (self.hasFeature(.avx)) .vsubss else .subss, + .mul => if (self.hasFeature(.avx)) .vmulss else .mulss, + .div_float, + .div_trunc, + .div_floor, + .div_exact, + => if (self.hasFeature(.avx)) .vdivss else .divss, + .max => if (self.hasFeature(.avx)) .vmaxss else .maxss, + .min => if (self.hasFeature(.avx)) .vminss else .minss, + else => unreachable, + }, + 64 => switch (air_tag) { + .add => if (self.hasFeature(.avx)) .vaddsd else .addsd, + .sub => if (self.hasFeature(.avx)) .vsubsd else .subsd, + .mul => if (self.hasFeature(.avx)) .vmulsd else .mulsd, + .div_float, + .div_trunc, + .div_floor, + .div_exact, + => if (self.hasFeature(.avx)) .vdivsd else .divsd, + .max => if (self.hasFeature(.avx)) .vmaxsd else .maxsd, + .min => if (self.hasFeature(.avx)) .vminsd else .minsd, + else => unreachable, + }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (lhs_ty.childType().zigTypeTag()) { + else => null, + .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen()) { + 1 => switch (air_tag) { + .add => if (self.hasFeature(.avx)) .vaddss else .addss, + .sub => if (self.hasFeature(.avx)) .vsubss else .subss, + .mul => if (self.hasFeature(.avx)) .vmulss else .mulss, + .div_float, + .div_trunc, + .div_floor, + .div_exact, + => if (self.hasFeature(.avx)) .vdivss else .divss, + .max => if (self.hasFeature(.avx)) .vmaxss else .maxss, + .min => if (self.hasFeature(.avx)) .vminss else .minss, + else => unreachable, + }, + 2...4 => switch (air_tag) { + .add => if (self.hasFeature(.avx)) .vaddps else .addps, + .sub => if (self.hasFeature(.avx)) .vsubps else .subps, + .mul => if (self.hasFeature(.avx)) .vmulps else .mulps, + .div_float, + .div_trunc, + .div_floor, + .div_exact, + => if (self.hasFeature(.avx)) .vdivps else .divps, + .max => if (self.hasFeature(.avx)) .vmaxps else .maxps, + .min => if (self.hasFeature(.avx)) .vminps else .minps, + else => unreachable, + }, + 5...8 => if (self.hasFeature(.avx)) switch (air_tag) { + .add => .vaddps, + .sub => .vsubps, + .mul => .vmulps, + .div_float, .div_trunc, .div_floor, .div_exact => .vdivps, + .max => .vmaxps, + .min => .vminps, + else => unreachable, + } else null, + else => null, + }, + 64 => switch (lhs_ty.vectorLen()) { + 1 => switch (air_tag) { + .add => if (self.hasFeature(.avx)) .vaddsd else .addsd, + .sub => if (self.hasFeature(.avx)) .vsubsd else .subsd, + .mul => if (self.hasFeature(.avx)) .vmulsd else .mulsd, + .div_float, + .div_trunc, + .div_floor, + .div_exact, + => if (self.hasFeature(.avx)) .vdivsd else .divsd, + .max => if (self.hasFeature(.avx)) .vmaxsd else .maxsd, + .min => if (self.hasFeature(.avx)) .vminsd else .minsd, + else => unreachable, + }, + 2 => switch (air_tag) { + .add => if (self.hasFeature(.avx)) .vaddpd else .addpd, + .sub => if (self.hasFeature(.avx)) .vsubpd else .subpd, + .mul => if (self.hasFeature(.avx)) .vmulpd else .mulpd, + .div_float, + .div_trunc, + .div_floor, + .div_exact, + => if (self.hasFeature(.avx)) .vdivpd else .divpd, + .max => if (self.hasFeature(.avx)) .vmaxpd else .maxpd, + .min => if (self.hasFeature(.avx)) .vminpd else .minpd, + else => unreachable, + }, + 3...4 => if (self.hasFeature(.avx)) switch (air_tag) { + .add => .vaddpd, + .sub => .vsubpd, + .mul => .vmulpd, + .div_float, .div_trunc, .div_floor, .div_exact => .vdivpd, + .max => .vmaxpd, + .min => .vminpd, + else => unreachable, + } else null, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + }, + })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }); + const dst_alias = registerAlias(dst_mcv.getReg().?, abi_size); + if (self.hasFeature(.avx)) { + const src1_alias = + if (copied_to_dst) dst_alias else registerAlias(lhs_mcv.getReg().?, abi_size); + if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( + mir_tag, + dst_alias, + src1_alias, + src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + ) else try self.asmRegisterRegisterRegister( + mir_tag, + dst_alias, + src1_alias, + registerAlias(if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(rhs_ty, src_mcv), abi_size), + ); + } else { + assert(copied_to_dst); + if (src_mcv.isMemory()) try self.asmRegisterMemory( + mir_tag, + dst_alias, + src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + ) else try self.asmRegisterRegister( + mir_tag, + dst_alias, + registerAlias(if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(rhs_ty, src_mcv), abi_size), + ); + } + switch (air_tag) { + .add, .sub, .mul, .div_float, .div_exact => {}, + .div_trunc, .div_floor => if (self.hasFeature(.sse4_1)) { + const round_tag = if (@as(?Mir.Inst.Tag, switch (lhs_ty.zigTypeTag()) { + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => if (self.hasFeature(.avx)) .vroundss else .roundss, + 64 => if (self.hasFeature(.avx)) .vroundsd else .roundsd, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen()) { + 1 => if (self.hasFeature(.avx)) .vroundss else .roundss, + 2...4 => if (self.hasFeature(.avx)) .vroundps else .roundps, + 5...8 => if (self.hasFeature(.avx)) .vroundps else null, + else => null, + }, + 64 => switch (lhs_ty.vectorLen()) { + 1 => if (self.hasFeature(.avx)) .vroundsd else .roundsd, + 2 => if (self.hasFeature(.avx)) .vroundpd else .roundpd, + 3...4 => if (self.hasFeature(.avx)) .vroundpd else null, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + else => null, + }, + else => unreachable, + })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }); + const round_mode = Immediate.u(switch (air_tag) { + .div_trunc => 0b1_0_11, + .div_floor => 0b1_0_01, + else => unreachable, + }); + switch (round_tag) { + .vroundss, .vroundsd => try self.asmRegisterRegisterRegisterImmediate( + round_tag, + dst_alias, + dst_alias, + dst_alias, + round_mode, + ), + else => try self.asmRegisterRegisterImmediate( + round_tag, + dst_alias, + dst_alias, + round_mode, + ), + } + } else return self.fail("TODO implement genBinOp for {s} {} without sse4_1", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + .max, .min => {}, // TODO: unordered select else => unreachable, } return dst_mcv; @@ -6186,20 +6310,11 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s .register_overflow, .reserved_frame, => unreachable, - .register => |src_reg| switch (ty.zigTypeTag()) { - .Float => { - if (!Target.x86.featureSetHas(self.target.cpu.features, .sse)) - return self.fail("TODO genBinOpMir for {s} {} without sse", .{ - @tagName(mir_tag), ty.fmt(self.bin_file.options.module.?), - }); - return self.asmRegisterRegister(mir_tag, dst_reg.to128(), src_reg.to128()); - }, - else => try self.asmRegisterRegister( - mir_tag, - dst_alias, - registerAlias(src_reg, abi_size), - ), - }, + .register => |src_reg| try self.asmRegisterRegister( + mir_tag, + dst_alias, + registerAlias(src_reg, abi_size), + ), .immediate => |imm| switch (self.regBitSize(ty)) { 8 => try self.asmRegisterImmediate( mir_tag, @@ -9646,7 +9761,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { lock.* = self.register_manager.lockRegAssumeUnused(reg); } - const tag = if (@as( + const mir_tag = if (@as( ?Mir.Inst.Tag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) switch (ty.zigTypeTag()) { @@ -9741,20 +9856,17 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const abi_size = @intCast(u32, ty.abiSize(self.target.*)); const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); - if (mops[2].isRegister()) - try self.asmRegisterRegisterRegister( - tag, - mop1_reg, - mop2_reg, - registerAlias(mops[2].getReg().?, abi_size), - ) - else - try self.asmRegisterRegisterMemory( - tag, - mop1_reg, - mop2_reg, - mops[2].mem(Memory.PtrSize.fromSize(abi_size)), - ); + if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( + mir_tag, + mop1_reg, + mop2_reg, + registerAlias(mops[2].getReg().?, abi_size), + ) else try self.asmRegisterRegisterMemory( + mir_tag, + mop1_reg, + mop2_reg, + mops[2].mem(Memory.PtrSize.fromSize(abi_size)), + ); return self.finishAir(inst, mops[0], ops); } diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index b242c98bdc..b8ccc9efba 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -262,61 +262,69 @@ pub const Mnemonic = enum { // MMX movd, // SSE - addss, + addps, addss, andps, andnps, cmpss, cvtsi2ss, - divss, - maxss, minss, + divps, divss, + maxps, maxss, + minps, minss, movaps, movss, movups, - mulss, + mulps, mulss, orps, pextrw, pinsrw, - sqrtps, - sqrtss, - subss, + sqrtps, sqrtss, + subps, subss, ucomiss, xorps, // SSE2 - addsd, + addpd, addsd, andpd, andnpd, //cmpsd, cvtsd2ss, cvtsi2sd, cvtss2sd, - divsd, - maxsd, minsd, + divpd, divsd, + maxpd, maxsd, + minpd, minsd, movapd, movq, //movd, movsd, movupd, - mulsd, + mulpd, mulsd, orpd, pshufhw, pshuflw, psrld, psrlq, psrlw, punpckhbw, punpckhdq, punpckhqdq, punpckhwd, punpcklbw, punpckldq, punpcklqdq, punpcklwd, sqrtpd, sqrtsd, - subsd, + subpd, subsd, ucomisd, xorpd, // SSE3 movddup, movshdup, movsldup, // SSE4.1 - roundsd, roundss, + roundpd, roundps, roundsd, roundss, // AVX + vaddpd, vaddps, vaddsd, vaddss, vcvtsd2ss, vcvtsi2sd, vcvtsi2ss, vcvtss2sd, + vdivpd, vdivps, vdivsd, vdivss, + vmaxpd, vmaxps, vmaxsd, vmaxss, + vminpd, vminps, vminsd, vminss, vmovapd, vmovaps, vmovddup, vmovsd, vmovshdup, vmovsldup, vmovss, vmovupd, vmovups, + vmulpd, vmulps, vmulsd, vmulss, vpextrw, vpinsrw, vpshufhw, vpshuflw, vpsrld, vpsrlq, vpsrlw, vpunpckhbw, vpunpckhdq, vpunpckhqdq, vpunpckhwd, vpunpcklbw, vpunpckldq, vpunpcklqdq, vpunpcklwd, + vroundpd, vroundps, vroundsd, vroundss, vsqrtpd, vsqrtps, vsqrtsd, vsqrtss, + vsubpd, vsubps, vsubsd, vsubss, // F16C vcvtph2ps, vcvtps2ph, // FMA diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 39ad2313e7..2cfa25ac84 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -124,27 +124,34 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .xchg, .xor, + .addps, .addss, .andnps, .andps, .cmpss, .cvtsi2ss, + .divps, .divss, + .maxps, .maxss, + .minps, .minss, .movaps, .movss, .movups, + .mulps, .mulss, .orps, .pextrw, .pinsrw, .sqrtps, .sqrtss, + .subps, .subss, .ucomiss, .xorps, + .addpd, .addsd, .andnpd, .andpd, @@ -152,10 +159,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .cvtsd2ss, .cvtsi2sd, .cvtss2sd, + .divpd, .divsd, + .maxpd, .maxsd, + .minpd, .minsd, .movsd, + .mulpd, .mulsd, .orpd, .pshufhw, @@ -173,6 +184,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .punpcklwd, .sqrtpd, .sqrtsd, + .subpd, .subsd, .ucomisd, .xorpd, @@ -181,13 +193,31 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .movshdup, .movsldup, + .roundpd, + .roundps, .roundsd, .roundss, + .vaddpd, + .vaddps, + .vaddsd, + .vaddss, .vcvtsd2ss, .vcvtsi2sd, .vcvtsi2ss, .vcvtss2sd, + .vdivpd, + .vdivps, + .vdivsd, + .vdivss, + .vmaxpd, + .vmaxps, + .vmaxsd, + .vmaxss, + .vminpd, + .vminps, + .vminsd, + .vminss, .vmovapd, .vmovaps, .vmovddup, @@ -197,6 +227,10 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .vmovss, .vmovupd, .vmovups, + .vmulpd, + .vmulps, + .vmulsd, + .vmulss, .vpextrw, .vpinsrw, .vpshufhw, @@ -212,10 +246,18 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .vpunpckldq, .vpunpcklqdq, .vpunpcklwd, + .vroundpd, + .vroundps, + .vroundsd, + .vroundss, .vsqrtpd, .vsqrtps, .vsqrtsd, .vsqrtss, + .vsubpd, + .vsubps, + .vsubsd, + .vsubss, .vcvtph2ps, .vcvtps2ph, @@ -304,6 +346,7 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { .lock_mi_rip_s, => Immediate.s(@bitCast(i32, i)), + .rrri, .rri_u, .ri_u, .i_u, @@ -429,6 +472,12 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { .{ .reg = inst.data.rrr.r2 }, .{ .reg = inst.data.rrr.r3 }, }, + .rrri => &.{ + .{ .reg = inst.data.rrri.r1 }, + .{ .reg = inst.data.rrri.r2 }, + .{ .reg = inst.data.rrri.r3 }, + .{ .imm = lower.imm(inst.ops, inst.data.rrri.i) }, + }, .ri_s, .ri_u => &.{ .{ .reg = inst.data.ri.r }, .{ .imm = lower.imm(inst.ops, inst.data.ri.i) }, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index b6df0fff09..c0450406cf 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -166,7 +166,9 @@ pub const Inst = struct { /// Logical exclusive-or xor, - /// Add single precision floating point values + /// Add packed single-precision floating-point values + addps, + /// Add scalar single-precision floating-point values addss, /// Bitwise logical and of packed single precision floating-point values andps, @@ -176,11 +178,17 @@ pub const Inst = struct { cmpss, /// Convert doubleword integer to scalar single-precision floating-point value cvtsi2ss, + /// Divide packed single-precision floating-point values + divps, /// Divide scalar single-precision floating-point values divss, - /// Return maximum single-precision floating-point value + /// Maximum of packed single-precision floating-point values + maxps, + /// Maximum of scalar single-precision floating-point values maxss, - /// Return minimum single-precision floating-point value + /// Minimum of packed single-precision floating-point values + minps, + /// Minimum of scalar single-precision floating-point values minss, /// Move aligned packed single-precision floating-point values movaps, @@ -188,6 +196,8 @@ pub const Inst = struct { movss, /// Move unaligned packed single-precision floating-point values movups, + /// Multiply packed single-precision floating-point values + mulps, /// Multiply scalar single-precision floating-point values mulss, /// Bitwise logical or of packed single precision floating-point values @@ -196,18 +206,22 @@ pub const Inst = struct { pextrw, /// Insert word pinsrw, - /// Square root of scalar single precision floating-point value + /// Square root of packed single-precision floating-point values sqrtps, - /// Subtract scalar single-precision floating-point values + /// Square root of scalar single-precision floating-point value sqrtss, - /// Square root of single precision floating-point values + /// Subtract packed single-precision floating-point values + subps, + /// Subtract scalar single-precision floating-point values subss, /// Unordered compare scalar single-precision floating-point values ucomiss, /// Bitwise logical xor of packed single precision floating-point values xorps, - /// Add double precision floating point values + /// Add packed double-precision floating-point values + addpd, + /// Add scalar double-precision floating-point values addsd, /// Bitwise logical and not of packed double precision floating-point values andnpd, @@ -221,14 +235,22 @@ pub const Inst = struct { cvtsi2sd, /// Convert scalar single-precision floating-point value to scalar double-precision floating-point value cvtss2sd, + /// Divide packed double-precision floating-point values + divpd, /// Divide scalar double-precision floating-point values divsd, - /// Return maximum double-precision floating-point value + /// Maximum of packed double-precision floating-point values + maxpd, + /// Maximum of scalar double-precision floating-point values maxsd, - /// Return minimum double-precision floating-point value + /// Minimum of packed double-precision floating-point values + minpd, + /// Minimum of scalar double-precision floating-point values minsd, /// Move scalar double-precision floating-point value movsd, + /// Multiply packed double-precision floating-point values + mulpd, /// Multiply scalar double-precision floating-point values mulsd, /// Bitwise logical or of packed double precision floating-point values @@ -263,6 +285,8 @@ pub const Inst = struct { sqrtpd, /// Square root of scalar double precision floating-point value sqrtsd, + /// Subtract packed double-precision floating-point values + subpd, /// Subtract scalar double-precision floating-point values subsd, /// Unordered compare scalar double-precision floating-point values @@ -277,11 +301,23 @@ pub const Inst = struct { /// Replicate single floating-point values movsldup, - /// Round scalar double-precision floating-point values + /// Round packed double-precision floating-point values + roundpd, + /// Round packed single-precision floating-point values + roundps, + /// Round scalar double-precision floating-point value roundsd, - /// Round scalar single-precision floating-point values + /// Round scalar single-precision floating-point value roundss, + /// Add packed double-precision floating-point values + vaddpd, + /// Add packed single-precision floating-point values + vaddps, + /// Add scalar double-precision floating-point values + vaddsd, + /// Add scalar single-precision floating-point values + vaddss, /// Convert scalar double-precision floating-point value to scalar single-precision floating-point value vcvtsd2ss, /// Convert doubleword integer to scalar double-precision floating-point value @@ -290,6 +326,30 @@ pub const Inst = struct { vcvtsi2ss, /// Convert scalar single-precision floating-point value to scalar double-precision floating-point value vcvtss2sd, + /// Divide packed double-precision floating-point values + vdivpd, + /// Divide packed single-precision floating-point values + vdivps, + /// Divide scalar double-precision floating-point values + vdivsd, + /// Divide scalar single-precision floating-point values + vdivss, + /// Maximum of packed double-precision floating-point values + vmaxpd, + /// Maximum of packed single-precision floating-point values + vmaxps, + /// Maximum of scalar double-precision floating-point values + vmaxsd, + /// Maximum of scalar single-precision floating-point values + vmaxss, + /// Minimum of packed double-precision floating-point values + vminpd, + /// Minimum of packed single-precision floating-point values + vminps, + /// Minimum of scalar double-precision floating-point values + vminsd, + /// Minimum of scalar single-precision floating-point values + vminss, /// Move aligned packed double-precision floating-point values vmovapd, /// Move aligned packed single-precision floating-point values @@ -308,6 +368,14 @@ pub const Inst = struct { vmovupd, /// Move unaligned packed single-precision floating-point values vmovups, + /// Multiply packed double-precision floating-point values + vmulpd, + /// Multiply packed single-precision floating-point values + vmulps, + /// Multiply scalar double-precision floating-point values + vmulsd, + /// Multiply scalar single-precision floating-point values + vmulss, /// Extract word vpextrw, /// Insert word @@ -338,6 +406,14 @@ pub const Inst = struct { vpunpcklqdq, /// Unpack low data vpunpcklwd, + /// Round packed double-precision floating-point values + vroundpd, + /// Round packed single-precision floating-point values + vroundps, + /// Round scalar double-precision floating-point value + vroundsd, + /// Round scalar single-precision floating-point value + vroundss, /// Square root of packed double-precision floating-point value vsqrtpd, /// Square root of packed single-precision floating-point value @@ -346,6 +422,14 @@ pub const Inst = struct { vsqrtsd, /// Square root of scalar single-precision floating-point value vsqrtss, + /// Subtract packed double-precision floating-point values + vsubpd, + /// Subtract packed single-precision floating-point values + vsubps, + /// Subtract scalar double-precision floating-point values + vsubsd, + /// Subtract scalar single-precision floating-point values + vsubss, /// Convert 16-bit floating-point values to single-precision floating-point values vcvtph2ps, @@ -442,6 +526,9 @@ pub const Inst = struct { /// Register, register, register operands. /// Uses `rrr` payload. rrr, + /// Register, register, register, immediate (byte) operands. + /// Uses `rrri` payload. + rrri, /// Register, register, immediate (sign-extended) operands. /// Uses `rri` payload. rri_s, @@ -625,6 +712,12 @@ pub const Inst = struct { r2: Register, r3: Register, }, + rrri: struct { + r1: Register, + r2: Register, + r3: Register, + i: u8, + }, rri: struct { r1: Register, r2: Register, diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 78bda4fc76..c41f0ea4e7 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -837,6 +837,8 @@ pub const table = [_]Entry{ .{ .xor, .rm, &.{ .r64, .rm64 }, &.{ 0x33 }, 0, .long, .none }, // SSE + .{ .addps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x58 }, 0, .none, .sse }, + .{ .addss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .none, .sse }, .{ .andnps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x55 }, 0, .none, .sse }, @@ -848,10 +850,16 @@ pub const table = [_]Entry{ .{ .cvtsi2ss, .rm, &.{ .xmm, .rm32 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .none, .sse }, .{ .cvtsi2ss, .rm, &.{ .xmm, .rm64 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .long, .sse }, + .{ .divps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5e }, 0, .none, .sse }, + .{ .divss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5e }, 0, .none, .sse }, + .{ .maxps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5f }, 0, .none, .sse }, + .{ .maxss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5f }, 0, .none, .sse }, + .{ .minps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5d }, 0, .none, .sse }, + .{ .minss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5d }, 0, .none, .sse }, .{ .movaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .none, .sse }, @@ -863,10 +871,14 @@ pub const table = [_]Entry{ .{ .movups, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x10 }, 0, .none, .sse }, .{ .movups, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x11 }, 0, .none, .sse }, + .{ .mulps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x59 }, 0, .none, .sse }, + .{ .mulss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .none, .sse }, .{ .orps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .none, .sse }, + .{ .subps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5c }, 0, .none, .sse }, + .{ .subss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .none, .sse }, .{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .none, .sse }, @@ -878,6 +890,8 @@ pub const table = [_]Entry{ .{ .xorps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x57 }, 0, .none, .sse }, // SSE2 + .{ .addpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x58 }, 0, .none, .sse2 }, + .{ .addsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x58 }, 0, .none, .sse2 }, .{ .andnpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x55 }, 0, .none, .sse2 }, @@ -893,10 +907,16 @@ pub const table = [_]Entry{ .{ .cvtss2sd, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5a }, 0, .none, .sse2 }, + .{ .divpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5e }, 0, .none, .sse2 }, + .{ .divsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5e }, 0, .none, .sse2 }, + .{ .maxpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5f }, 0, .none, .sse2 }, + .{ .maxsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5f }, 0, .none, .sse2 }, + .{ .minpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5d }, 0, .none, .sse2 }, + .{ .minsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5d }, 0, .none, .sse2 }, .{ .movapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .none, .sse2 }, @@ -914,6 +934,8 @@ pub const table = [_]Entry{ .{ .movupd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x10 }, 0, .none, .sse2 }, .{ .movupd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x11 }, 0, .none, .sse2 }, + .{ .mulpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x59 }, 0, .none, .sse2 }, + .{ .mulsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x59 }, 0, .none, .sse2 }, .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .none, .sse2 }, @@ -947,6 +969,8 @@ pub const table = [_]Entry{ .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .none, .sse2 }, + .{ .subpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5c }, 0, .none, .sse2 }, + .{ .subsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .none, .sse2 }, .{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .none, .sse2 }, @@ -966,10 +990,25 @@ pub const table = [_]Entry{ // SSE4.1 .{ .pextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .none, .sse4_1 }, - .{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .none, .sse4_1 }, + .{ .roundpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .none, .sse4_1 }, + + .{ .roundps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x08 }, 0, .none, .sse4_1 }, + .{ .roundsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .none, .sse4_1 }, + .{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .none, .sse4_1 }, + // AVX + .{ .vaddpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x58 }, 0, .vex_128_wig, .avx }, + .{ .vaddpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x58 }, 0, .vex_256_wig, .avx }, + + .{ .vaddps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x58 }, 0, .vex_128_wig, .avx }, + .{ .vaddps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x58 }, 0, .vex_256_wig, .avx }, + + .{ .vaddsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x58 }, 0, .vex_lig_wig, .avx }, + + .{ .vaddss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .vex_lig_wig, .avx }, + .{ .vcvtsd2ss, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .vex_lig_wig, .avx }, .{ .vcvtsi2sd, .rvm, &.{ .xmm, .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w0, .avx }, @@ -980,6 +1019,36 @@ pub const table = [_]Entry{ .{ .vcvtss2sd, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .vex_lig_wig, .avx }, + .{ .vdivpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5e }, 0, .vex_128_wig, .avx }, + .{ .vdivpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5e }, 0, .vex_256_wig, .avx }, + + .{ .vdivps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x5e }, 0, .vex_128_wig, .avx }, + .{ .vdivps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x5e }, 0, .vex_256_wig, .avx }, + + .{ .vdivsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5e }, 0, .vex_lig_wig, .avx }, + + .{ .vdivss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5e }, 0, .vex_lig_wig, .avx }, + + .{ .vmaxpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5f }, 0, .vex_128_wig, .avx }, + .{ .vmaxpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5f }, 0, .vex_256_wig, .avx }, + + .{ .vmaxps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x5f }, 0, .vex_128_wig, .avx }, + .{ .vmaxps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x5f }, 0, .vex_256_wig, .avx }, + + .{ .vmaxsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5f }, 0, .vex_lig_wig, .avx }, + + .{ .vmaxss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5f }, 0, .vex_lig_wig, .avx }, + + .{ .vminpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5d }, 0, .vex_128_wig, .avx }, + .{ .vminpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5d }, 0, .vex_256_wig, .avx }, + + .{ .vminps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x5d }, 0, .vex_128_wig, .avx }, + .{ .vminps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x5d }, 0, .vex_256_wig, .avx }, + + .{ .vminsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5d }, 0, .vex_lig_wig, .avx }, + + .{ .vminss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5d }, 0, .vex_lig_wig, .avx }, + .{ .vmovapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .vex_128_wig, .avx }, .{ .vmovapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .vex_128_wig, .avx }, .{ .vmovapd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x28 }, 0, .vex_256_wig, .avx }, @@ -1019,6 +1088,16 @@ pub const table = [_]Entry{ .{ .vmovups, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x10 }, 0, .vex_256_wig, .avx }, .{ .vmovups, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x0f, 0x11 }, 0, .vex_256_wig, .avx }, + .{ .vmulpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x59 }, 0, .vex_128_wig, .avx }, + .{ .vmulpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x59 }, 0, .vex_256_wig, .avx }, + + .{ .vmulps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x59 }, 0, .vex_128_wig, .avx }, + .{ .vmulps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x59 }, 0, .vex_256_wig, .avx }, + + .{ .vmulsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x59 }, 0, .vex_lig_wig, .avx }, + + .{ .vmulss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .vex_lig_wig, .avx }, + .{ .vpextrw, .rmi, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x15 }, 0, .vex_128_wig, .avx }, .{ .vpextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .vex_128_wig, .avx }, @@ -1041,6 +1120,16 @@ pub const table = [_]Entry{ .{ .vpunpckldq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .vex_128_wig, .avx }, .{ .vpunpcklqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .vex_128_wig, .avx }, + .{ .vroundpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .vex_128_wig, .avx }, + .{ .vroundpd, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .vex_256_wig, .avx }, + + .{ .vroundps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x08 }, 0, .vex_128_wig, .avx }, + .{ .vroundps, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x08 }, 0, .vex_256_wig, .avx }, + + .{ .vroundsd, .rvmi, &.{ .xmm, .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .vex_lig_wig, .avx }, + + .{ .vroundss, .rvmi, &.{ .xmm, .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .vex_lig_wig, .avx }, + .{ .vsqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .vex_128_wig, .avx }, .{ .vsqrtpd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x51 }, 0, .vex_256_wig, .avx }, @@ -1051,6 +1140,16 @@ pub const table = [_]Entry{ .{ .vsqrtss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .vex_lig_wig, .avx }, + .{ .vsubpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5c }, 0, .vex_128_wig, .avx }, + .{ .vsubpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5c }, 0, .vex_256_wig, .avx }, + + .{ .vsubps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x5c }, 0, .vex_128_wig, .avx }, + .{ .vsubps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x5c }, 0, .vex_256_wig, .avx }, + + .{ .vsubsd, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .vex_lig_wig, .avx }, + + .{ .vsubss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .vex_lig_wig, .avx }, + // F16C .{ .vcvtph2ps, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_128_w0, .f16c }, .{ .vcvtph2ps, .rm, &.{ .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_256_w0, .f16c }, -- cgit v1.2.3 From f8708e2c4d93eece5b3e131fd2d1b5b210806cd6 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 May 2023 10:04:56 -0400 Subject: x86_64: implement `@floor`, `@ceil`, and `@trunc` for float vectors --- src/arch/x86_64/CodeGen.zig | 176 ++++++++++++++++++++++---------------------- test/behavior/floatop.zig | 9 ++- 2 files changed, 93 insertions(+), 92 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 8c6f14ec3a..3e2d418105 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1587,9 +1587,9 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .round, => try self.airUnaryMath(inst), - .floor => try self.airRound(inst, Immediate.u(0b1_0_01)), - .ceil => try self.airRound(inst, Immediate.u(0b1_0_10)), - .trunc_float => try self.airRound(inst, Immediate.u(0b1_0_11)), + .floor => try self.airRound(inst, 0b1_0_01), + .ceil => try self.airRound(inst, 0b1_0_10), + .trunc_float => try self.airRound(inst, 0b1_0_11), .sqrt => try self.airSqrt(inst), .neg, .fabs => try self.airFloatSign(inst), @@ -4509,49 +4509,91 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } -fn airRound(self: *Self, inst: Air.Inst.Index, mode: Immediate) !void { +fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); - if (!self.hasFeature(.sse4_1)) - return self.fail("TODO implement airRound without sse4_1 feature", .{}); - const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) src_mcv else try self.copyToRegisterWithInstTracking(inst, ty, src_mcv); + const dst_reg = dst_mcv.getReg().?; + const dst_lock = self.register_manager.lockReg(dst_reg); + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + try self.genRound(ty, dst_reg, src_mcv, mode); + return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); +} - const mir_tag: Mir.Inst.Tag = switch (ty.zigTypeTag()) { +fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4) !void { + if (!self.hasFeature(.sse4_1)) + return self.fail("TODO implement genRound without sse4_1 feature", .{}); + + const mir_tag = if (@as(?Mir.Inst.Tag, switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { - 32 => .roundss, - 64 => .roundsd, - else => return self.fail("TODO implement airRound for {}", .{ - ty.fmt(self.bin_file.options.module.?), - }), + 32 => if (self.hasFeature(.avx)) .vroundss else .roundss, + 64 => if (self.hasFeature(.avx)) .vroundsd else .roundsd, + 16, 80, 128 => null, + else => unreachable, }, - else => return self.fail("TODO implement airRound for {}", .{ - ty.fmt(self.bin_file.options.module.?), - }), - }; - assert(dst_mcv.isRegister()); + .Vector => switch (ty.childType().zigTypeTag()) { + .Float => switch (ty.childType().floatBits(self.target.*)) { + 32 => switch (ty.vectorLen()) { + 1 => if (self.hasFeature(.avx)) .vroundss else .roundss, + 2...4 => if (self.hasFeature(.avx)) .vroundps else .roundps, + 5...8 => if (self.hasFeature(.avx)) .vroundps else null, + else => null, + }, + 64 => switch (ty.vectorLen()) { + 1 => if (self.hasFeature(.avx)) .vroundsd else .roundsd, + 2 => if (self.hasFeature(.avx)) .vroundpd else .roundpd, + 3...4 => if (self.hasFeature(.avx)) .vroundpd else null, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + else => null, + }, + else => unreachable, + })) |tag| tag else return self.fail("TODO implement genRound for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }); + const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); - if (src_mcv.isRegister()) - try self.asmRegisterRegisterImmediate( + const dst_alias = registerAlias(dst_reg, abi_size); + switch (mir_tag) { + .vroundss, .vroundsd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( mir_tag, - dst_reg, - registerAlias(src_mcv.getReg().?, abi_size), - mode, - ) - else - try self.asmRegisterMemoryImmediate( + dst_alias, + dst_alias, + src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + Immediate.u(mode), + ) else try self.asmRegisterRegisterRegisterImmediate( mir_tag, - dst_reg, - src_mcv.mem(Memory.PtrSize.fromSize(@intCast(u32, ty.abiSize(self.target.*)))), - mode, - ); - return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); + dst_alias, + dst_alias, + registerAlias(if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(ty, src_mcv), abi_size), + Immediate.u(mode), + ), + else => if (src_mcv.isMemory()) try self.asmRegisterMemoryImmediate( + mir_tag, + dst_alias, + src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), + Immediate.u(mode), + ) else try self.asmRegisterRegisterImmediate( + mir_tag, + dst_alias, + registerAlias(if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(ty, src_mcv), abi_size), + Immediate.u(mode), + ), + } } fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { @@ -6188,18 +6230,18 @@ fn genBinOp( })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }); - const dst_alias = registerAlias(dst_mcv.getReg().?, abi_size); + const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); if (self.hasFeature(.avx)) { const src1_alias = - if (copied_to_dst) dst_alias else registerAlias(lhs_mcv.getReg().?, abi_size); + if (copied_to_dst) dst_reg else registerAlias(lhs_mcv.getReg().?, abi_size); if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( mir_tag, - dst_alias, + dst_reg, src1_alias, src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), ) else try self.asmRegisterRegisterRegister( mir_tag, - dst_alias, + dst_reg, src1_alias, registerAlias(if (src_mcv.isRegister()) src_mcv.getReg().? @@ -6210,11 +6252,11 @@ fn genBinOp( assert(copied_to_dst); if (src_mcv.isMemory()) try self.asmRegisterMemory( mir_tag, - dst_alias, + dst_reg, src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), ) else try self.asmRegisterRegister( mir_tag, - dst_alias, + dst_reg, registerAlias(if (src_mcv.isRegister()) src_mcv.getReg().? else @@ -6223,60 +6265,16 @@ fn genBinOp( } switch (air_tag) { .add, .sub, .mul, .div_float, .div_exact => {}, - .div_trunc, .div_floor => if (self.hasFeature(.sse4_1)) { - const round_tag = if (@as(?Mir.Inst.Tag, switch (lhs_ty.zigTypeTag()) { - .Float => switch (lhs_ty.floatBits(self.target.*)) { - 32 => if (self.hasFeature(.avx)) .vroundss else .roundss, - 64 => if (self.hasFeature(.avx)) .vroundsd else .roundsd, - 16, 80, 128 => null, - else => unreachable, - }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .vroundss else .roundss, - 2...4 => if (self.hasFeature(.avx)) .vroundps else .roundps, - 5...8 => if (self.hasFeature(.avx)) .vroundps else null, - else => null, - }, - 64 => switch (lhs_ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .vroundsd else .roundsd, - 2 => if (self.hasFeature(.avx)) .vroundpd else .roundpd, - 3...4 => if (self.hasFeature(.avx)) .vroundpd else null, - else => null, - }, - 16, 80, 128 => null, - else => unreachable, - }, - else => null, - }, - else => unreachable, - })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ - @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), - }); - const round_mode = Immediate.u(switch (air_tag) { + .div_trunc, .div_floor => try self.genRound( + lhs_ty, + dst_reg, + .{ .register = dst_reg }, + switch (air_tag) { .div_trunc => 0b1_0_11, .div_floor => 0b1_0_01, else => unreachable, - }); - switch (round_tag) { - .vroundss, .vroundsd => try self.asmRegisterRegisterRegisterImmediate( - round_tag, - dst_alias, - dst_alias, - dst_alias, - round_mode, - ), - else => try self.asmRegisterRegisterImmediate( - round_tag, - dst_alias, - dst_alias, - round_mode, - ), - } - } else return self.fail("TODO implement genBinOp for {s} {} without sse4_1", .{ - @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), - }), + }, + ), .max, .min => {}, // TODO: unordered select else => unreachable, } diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index 3f407061f4..3d46c267d3 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -617,7 +617,8 @@ fn testFloor() !void { test "@floor with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -707,7 +708,8 @@ fn testCeil() !void { test "@ceil with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -797,7 +799,8 @@ fn testTrunc() !void { test "@trunc with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 6778da4516e68c271cb50fe9c252ab4084daf16b Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 7 May 2023 20:42:46 -0400 Subject: x86_64: implement binary operations for `f16` and `f16` vectors --- src/arch/x86_64/CodeGen.zig | 261 ++++++++++++++++++++++++++++++++++++++---- src/arch/x86_64/Encoding.zig | 23 ++-- src/arch/x86_64/Lower.zig | 22 ++++ src/arch/x86_64/Mir.zig | 44 +++++++ src/arch/x86_64/encodings.zig | 20 ++++ test/behavior/floatop.zig | 22 ++-- test/behavior/muladd.zig | 8 +- 7 files changed, 354 insertions(+), 46 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 3e2d418105..154b909a21 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4497,14 +4497,15 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; try self.genBinOpMir(switch (ty_bits) { // No point using an extra prefix byte for *pd which performs the same operation. - 32, 64 => switch (tag) { + 16, 32, 64, 128 => switch (tag) { .neg => .xorps, .fabs => .andnps, else => unreachable, }, - else => return self.fail("TODO implement airFloatSign for {}", .{ + 80 => return self.fail("TODO implement airFloatSign for {}", .{ ty.fmt(self.bin_file.options.module.?), }), + else => unreachable, }, vec_ty, dst_mcv, sign_mcv); return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } @@ -6112,9 +6113,53 @@ fn genBinOp( return dst_mcv; } + const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); const mir_tag = if (@as(?Mir.Inst.Tag, switch (lhs_ty.zigTypeTag()) { else => unreachable, .Float => switch (lhs_ty.floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) { + const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); + const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); + defer self.register_manager.unlockReg(tmp_lock); + + if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( + .vpinsrw, + dst_reg, + dst_reg, + src_mcv.mem(.word), + Immediate.u(1), + ) else try self.asmRegisterRegisterRegister( + .vpunpcklwd, + dst_reg, + dst_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), + ); + try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg); + try self.asmRegisterRegister(.vmovshdup, tmp_reg, dst_reg); + try self.asmRegisterRegisterRegister( + switch (air_tag) { + .add => .vaddss, + .sub => .vsubss, + .div_float, .div_trunc, .div_floor, .div_exact => .vdivss, + .max => .vmaxss, + .min => .vmaxss, + else => unreachable, + }, + dst_reg, + dst_reg, + tmp_reg, + ); + try self.asmRegisterRegisterImmediate( + .vcvtps2ph, + dst_reg, + dst_reg, + Immediate.u(0b1_00), + ); + return dst_mcv; + } else null, 32 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .vaddss else .addss, .sub => if (self.hasFeature(.avx)) .vsubss else .subss, @@ -6141,12 +6186,178 @@ fn genBinOp( .min => if (self.hasFeature(.avx)) .vminsd else .minsd, else => unreachable, }, - 16, 80, 128 => null, + 80, 128 => null, else => unreachable, }, .Vector => switch (lhs_ty.childType().zigTypeTag()) { else => null, .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen()) { + 1 => { + const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); + const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); + defer self.register_manager.unlockReg(tmp_lock); + + if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( + .vpinsrw, + dst_reg, + dst_reg, + src_mcv.mem(.word), + Immediate.u(1), + ) else try self.asmRegisterRegisterRegister( + .vpunpcklwd, + dst_reg, + dst_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), + ); + try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg); + try self.asmRegisterRegister(.vmovshdup, tmp_reg, dst_reg); + try self.asmRegisterRegisterRegister( + switch (air_tag) { + .add => .vaddss, + .sub => .vsubss, + .div_float, .div_trunc, .div_floor, .div_exact => .vdivss, + .max => .vmaxss, + .min => .vmaxss, + else => unreachable, + }, + dst_reg, + dst_reg, + tmp_reg, + ); + try self.asmRegisterRegisterImmediate( + .vcvtps2ph, + dst_reg, + dst_reg, + Immediate.u(0b1_00), + ); + return dst_mcv; + }, + 2 => { + const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); + const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); + defer self.register_manager.unlockReg(tmp_lock); + + if (src_mcv.isMemory()) try self.asmRegisterMemoryImmediate( + .vpinsrd, + dst_reg, + src_mcv.mem(.dword), + Immediate.u(1), + ) else try self.asmRegisterRegisterRegister( + .vunpcklps, + dst_reg, + dst_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), + ); + try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg); + try self.asmRegisterRegisterRegister(.vmovhlps, tmp_reg, dst_reg, dst_reg); + try self.asmRegisterRegisterRegister( + switch (air_tag) { + .add => .vaddps, + .sub => .vsubps, + .div_float, .div_trunc, .div_floor, .div_exact => .vdivps, + .max => .vmaxps, + .min => .vmaxps, + else => unreachable, + }, + dst_reg, + dst_reg, + tmp_reg, + ); + try self.asmRegisterRegisterImmediate( + .vcvtps2ph, + dst_reg, + dst_reg, + Immediate.u(0b1_00), + ); + return dst_mcv; + }, + 3...4 => { + const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); + const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); + defer self.register_manager.unlockReg(tmp_lock); + + try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg); + if (src_mcv.isMemory()) try self.asmRegisterMemory( + .vcvtph2ps, + tmp_reg, + src_mcv.mem(.qword), + ) else try self.asmRegisterRegister( + .vcvtph2ps, + tmp_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), + ); + try self.asmRegisterRegisterRegister( + switch (air_tag) { + .add => .vaddps, + .sub => .vsubps, + .div_float, .div_trunc, .div_floor, .div_exact => .vdivps, + .max => .vmaxps, + .min => .vmaxps, + else => unreachable, + }, + dst_reg, + dst_reg, + tmp_reg, + ); + try self.asmRegisterRegisterImmediate( + .vcvtps2ph, + dst_reg, + dst_reg, + Immediate.u(0b1_00), + ); + return dst_mcv; + }, + 5...8 => { + const tmp_reg = (try self.register_manager.allocReg(null, sse)).to256(); + const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); + defer self.register_manager.unlockReg(tmp_lock); + + try self.asmRegisterRegister(.vcvtph2ps, dst_reg.to256(), dst_reg); + if (src_mcv.isMemory()) try self.asmRegisterMemory( + .vcvtph2ps, + tmp_reg, + src_mcv.mem(.xword), + ) else try self.asmRegisterRegister( + .vcvtph2ps, + tmp_reg, + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), + ); + try self.asmRegisterRegisterRegister( + switch (air_tag) { + .add => .vaddps, + .sub => .vsubps, + .div_float, .div_trunc, .div_floor, .div_exact => .vdivps, + .max => .vmaxps, + .min => .vmaxps, + else => unreachable, + }, + dst_reg.to256(), + dst_reg.to256(), + tmp_reg, + ); + try self.asmRegisterRegisterImmediate( + .vcvtps2ph, + dst_reg, + dst_reg.to256(), + Immediate.u(0b1_00), + ); + return dst_mcv; + }, + else => null, + } else null, 32 => switch (lhs_ty.vectorLen()) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .vaddss else .addss, @@ -6223,14 +6434,13 @@ fn genBinOp( } else null, else => null, }, - 16, 80, 128 => null, + 80, 128 => null, else => unreachable, }, }, })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }); - const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); if (self.hasFeature(.avx)) { const src1_alias = if (copied_to_dst) dst_reg else registerAlias(lhs_mcv.getReg().?, abi_size); @@ -7139,21 +7349,21 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const tmp2_lock = self.register_manager.lockRegAssumeUnused(tmp2_reg); defer self.register_manager.unlockReg(tmp2_lock); - if (src_mcv.isRegister()) - try self.asmRegisterRegisterRegister( - .vpunpcklwd, - tmp1_reg, - dst_reg.to128(), - src_mcv.getReg().?.to128(), - ) - else - try self.asmRegisterRegisterMemoryImmediate( - .vpinsrw, - tmp1_reg, - dst_reg.to128(), - src_mcv.mem(.word), - Immediate.u(1), - ); + if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( + .vpinsrw, + tmp1_reg, + dst_reg.to128(), + src_mcv.mem(.word), + Immediate.u(1), + ) else try self.asmRegisterRegisterRegister( + .vpunpcklwd, + tmp1_reg, + dst_reg.to128(), + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(ty, src_mcv)).to128(), + ); try self.asmRegisterRegister(.vcvtph2ps, tmp1_reg, tmp1_reg); try self.asmRegisterRegister(.vmovshdup, tmp2_reg, tmp1_reg); try self.genBinOpMir(.ucomiss, ty, tmp1_mcv, tmp2_mcv); @@ -8139,7 +8349,16 @@ fn movMirTag(self: *Self, ty: Type, aligned: bool) !Mir.Inst.Tag { }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => unreachable, // needs special handling + 16 => switch (ty.vectorLen()) { + 1 => unreachable, // needs special handling + 2 => return if (self.hasFeature(.avx)) .vmovss else .movss, + 3...4 => return if (self.hasFeature(.avx)) .vmovsd else .movsd, + 5...8 => return if (self.hasFeature(.avx)) + if (aligned) .vmovaps else .vmovups + else if (aligned) .movaps else .movups, + 9...16 => if (self.hasFeature(.avx)) return if (aligned) .vmovaps else .vmovups, + else => {}, + }, 32 => switch (ty.vectorLen()) { 1 => return if (self.hasFeature(.avx)) .vmovss else .movss, 2...4 => return if (self.hasFeature(.avx)) diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index b8ccc9efba..3235b29358 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -270,7 +270,7 @@ pub const Mnemonic = enum { divps, divss, maxps, maxss, minps, minss, - movaps, movss, movups, + movaps, movhlps, movss, movups, mulps, mulss, orps, pextrw, pinsrw, @@ -303,6 +303,8 @@ pub const Mnemonic = enum { // SSE3 movddup, movshdup, movsldup, // SSE4.1 + pextrb, pextrd, pextrq, + pinsrb, pinsrd, pinsrq, roundpd, roundps, roundsd, roundss, // AVX vaddpd, vaddps, vaddsd, vaddss, @@ -311,13 +313,14 @@ pub const Mnemonic = enum { vmaxpd, vmaxps, vmaxsd, vmaxss, vminpd, vminps, vminsd, vminss, vmovapd, vmovaps, - vmovddup, + vmovddup, vmovhlps, vmovsd, vmovshdup, vmovsldup, vmovss, vmovupd, vmovups, vmulpd, vmulps, vmulsd, vmulss, - vpextrw, vpinsrw, + vpextrb, vpextrd, vpextrq, vpextrw, + vpinsrb, vpinsrd, vpinsrq, vpinsrw, vpshufhw, vpshuflw, vpsrld, vpsrlq, vpsrlw, vpunpckhbw, vpunpckhdq, vpunpckhqdq, vpunpckhwd, @@ -359,7 +362,7 @@ pub const Op = enum { cl, r8, r16, r32, r64, rm8, rm16, rm32, rm64, - r32_m16, r64_m16, + r32_m8, r32_m16, r64_m16, m8, m16, m32, m64, m80, m128, m256, rel8, rel16, rel32, m, @@ -444,7 +447,7 @@ pub const Op = enum { pub fn immBitSize(op: Op) u64 { return switch (op) { .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, - .al, .cl, .r8, .rm8 => unreachable, + .al, .cl, .r8, .rm8, .r32_m8 => unreachable, .ax, .r16, .rm16 => unreachable, .eax, .r32, .rm32, .r32_m16 => unreachable, .rax, .r64, .rm64, .r64_m16 => unreachable, @@ -467,7 +470,7 @@ pub const Op = enum { .m8, .m16, .m32, .m64, .m80, .m128, .m256 => unreachable, .al, .cl, .r8, .rm8 => 8, .ax, .r16, .rm16 => 16, - .eax, .r32, .rm32, .r32_m16 => 32, + .eax, .r32, .rm32, .r32_m8, .r32_m16 => 32, .rax, .r64, .rm64, .r64_m16 => 64, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => 128, .ymm, .ymm_m256 => 256, @@ -480,7 +483,7 @@ pub const Op = enum { .unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable, .rel8, .rel16, .rel32 => unreachable, .al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64, .xmm, .ymm => unreachable, - .m8, .rm8 => 8, + .m8, .rm8, .r32_m8 => 8, .m16, .rm16, .r32_m16, .r64_m16 => 16, .m32, .rm32, .xmm_m32 => 32, .m64, .rm64, .xmm_m64 => 64, @@ -509,7 +512,7 @@ pub const Op = enum { .al, .ax, .eax, .rax, .r8, .r16, .r32, .r64, .rm8, .rm16, .rm32, .rm64, - .r32_m16, .r64_m16, + .r32_m8, .r32_m16, .r64_m16, .xmm, .xmm_m32, .xmm_m64, .xmm_m128, .ymm, .ymm_m256, => true, @@ -535,7 +538,7 @@ pub const Op = enum { // zig fmt: off return switch (op) { .rm8, .rm16, .rm32, .rm64, - .r32_m16, .r64_m16, + .r32_m8, .r32_m16, .r64_m16, .m8, .m16, .m32, .m64, .m80, .m128, .m256, .m, .xmm_m32, .xmm_m64, .xmm_m128, @@ -559,7 +562,7 @@ pub const Op = enum { .al, .ax, .eax, .rax, .cl => .general_purpose, .r8, .r16, .r32, .r64 => .general_purpose, .rm8, .rm16, .rm32, .rm64 => .general_purpose, - .r32_m16, .r64_m16 => .general_purpose, + .r32_m8, .r32_m16, .r64_m16 => .general_purpose, .sreg => .segment, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => .floating_point, .ymm, .ymm_m256 => .floating_point, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 2cfa25ac84..5c079f4768 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -137,6 +137,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .minps, .minss, .movaps, + .movhlps, .movss, .movups, .mulps, @@ -149,6 +150,8 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .subps, .subss, .ucomiss, + .unpckhps, + .unpcklps, .xorps, .addpd, @@ -187,12 +190,20 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .subpd, .subsd, .ucomisd, + .unpckhpd, + .unpcklpd, .xorpd, .movddup, .movshdup, .movsldup, + .pextrb, + .pextrd, + .pextrq, + .pinsrb, + .pinsrd, + .pinsrq, .roundpd, .roundps, .roundsd, @@ -221,6 +232,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .vmovapd, .vmovaps, .vmovddup, + .vmovhlps, .vmovsd, .vmovshdup, .vmovsldup, @@ -231,7 +243,13 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .vmulps, .vmulsd, .vmulss, + .vpextrb, + .vpextrd, + .vpextrq, .vpextrw, + .vpinsrb, + .vpinsrd, + .vpinsrq, .vpinsrw, .vpshufhw, .vpshuflw, @@ -258,6 +276,10 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .vsubps, .vsubsd, .vsubss, + .vunpckhpd, + .vunpckhps, + .vunpcklpd, + .vunpcklps, .vcvtph2ps, .vcvtps2ph, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index c0450406cf..442cfabebb 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -192,6 +192,8 @@ pub const Inst = struct { minss, /// Move aligned packed single-precision floating-point values movaps, + /// Move packed single-precision floating-point values high to low + movhlps, /// Move scalar single-precision floating-point value movss, /// Move unaligned packed single-precision floating-point values @@ -216,6 +218,10 @@ pub const Inst = struct { subss, /// Unordered compare scalar single-precision floating-point values ucomiss, + /// Unpack and interleave high packed single-precision floating-point values + unpckhps, + /// Unpack and interleave low packed single-precision floating-point values + unpcklps, /// Bitwise logical xor of packed single precision floating-point values xorps, @@ -291,6 +297,10 @@ pub const Inst = struct { subsd, /// Unordered compare scalar double-precision floating-point values ucomisd, + /// Unpack and interleave high packed double-precision floating-point values + unpckhpd, + /// Unpack and interleave low packed double-precision floating-point values + unpcklpd, /// Bitwise logical xor of packed double precision floating-point values xorpd, @@ -301,6 +311,18 @@ pub const Inst = struct { /// Replicate single floating-point values movsldup, + /// Extract Byte + pextrb, + /// Extract Doubleword + pextrd, + /// Extract Quadword + pextrq, + /// Insert Byte + pinsrb, + /// Insert Doubleword + pinsrd, + /// Insert Quadword + pinsrq, /// Round packed double-precision floating-point values roundpd, /// Round packed single-precision floating-point values @@ -354,6 +376,8 @@ pub const Inst = struct { vmovapd, /// Move aligned packed single-precision floating-point values vmovaps, + /// Move packed single-precision floating-point values high to low + vmovhlps, /// Replicate double floating-point values vmovddup, /// Move or merge scalar double-precision floating-point value @@ -376,8 +400,20 @@ pub const Inst = struct { vmulsd, /// Multiply scalar single-precision floating-point values vmulss, + /// Extract Byte + vpextrb, + /// Extract Doubleword + vpextrd, + /// Extract Quadword + vpextrq, /// Extract word vpextrw, + /// Insert Byte + vpinsrb, + /// Insert Doubleword + vpinsrd, + /// Insert Quadword + vpinsrq, /// Insert word vpinsrw, /// Shuffle packed high words @@ -430,6 +466,14 @@ pub const Inst = struct { vsubsd, /// Subtract scalar single-precision floating-point values vsubss, + /// Unpack and interleave high packed double-precision floating-point values + vunpckhpd, + /// Unpack and interleave high packed single-precision floating-point values + vunpckhps, + /// Unpack and interleave low packed double-precision floating-point values + vunpcklpd, + /// Unpack and interleave low packed single-precision floating-point values + vunpcklps, /// Convert 16-bit floating-point values to single-precision floating-point values vcvtph2ps, diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index c41f0ea4e7..2b9d530c1e 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -865,6 +865,8 @@ pub const table = [_]Entry{ .{ .movaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .none, .sse }, .{ .movaps, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x29 }, 0, .none, .sse }, + .{ .movhlps, .rm, &.{ .xmm, .xmm }, &.{ 0x0f, 0x12 }, 0, .none, .sse }, + .{ .movss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .none, .sse }, .{ .movss, .mr, &.{ .xmm_m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .none, .sse }, @@ -988,8 +990,16 @@ pub const table = [_]Entry{ .{ .movsldup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .none, .sse3 }, // SSE4.1 + .{ .pextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .none, .sse4_1 }, + .{ .pextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .none, .sse4_1 }, + .{ .pextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .long, .sse4_1 }, + .{ .pextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .none, .sse4_1 }, + .{ .pinsrb, .rmi, &.{ .xmm, .r32_m8, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x20 }, 0, .none, .sse4_1 }, + .{ .pinsrd, .rmi, &.{ .xmm, .rm32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .none, .sse4_1 }, + .{ .pinsrq, .rmi, &.{ .xmm, .rm64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .long, .sse4_1 }, + .{ .roundpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .none, .sse4_1 }, .{ .roundps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x08 }, 0, .none, .sse4_1 }, @@ -1062,6 +1072,8 @@ pub const table = [_]Entry{ .{ .vmovddup, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_128_wig, .avx }, .{ .vmovddup, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_256_wig, .avx }, + .{ .vmovhlps, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0x0f, 0x12 }, 0, .vex_128_wig, .avx }, + .{ .vmovsd, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, .{ .vmovsd, .rm, &.{ .xmm, .m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, .{ .vmovsd, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, @@ -1098,9 +1110,17 @@ pub const table = [_]Entry{ .{ .vmulss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .vex_lig_wig, .avx }, + .{ .vpextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .vex_128_w0, .avx }, + .{ .vpextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .vex_128_w0, .avx }, + .{ .vpextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .vex_128_w1, .avx }, + .{ .vpextrw, .rmi, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x15 }, 0, .vex_128_wig, .avx }, .{ .vpextrw, .mri, &.{ .r32_m16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .vex_128_wig, .avx }, + .{ .vpinsrb, .rmi, &.{ .xmm, .r32_m8, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x20 }, 0, .vex_128_w0, .avx }, + .{ .vpinsrd, .rmi, &.{ .xmm, .rm32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .vex_128_w0, .avx }, + .{ .vpinsrq, .rmi, &.{ .xmm, .rm64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .vex_128_w1, .avx }, + .{ .vpinsrw, .rvmi, &.{ .xmm, .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .vex_128_wig, .avx }, .{ .vpsrlw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_128_wig, .avx }, diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index 3d46c267d3..242c8dabe5 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -8,6 +8,8 @@ const has_f80_rt = switch (builtin.cpu.arch) { .x86_64, .x86 => true, else => false, }; +const no_x86_64_hardware_f16_support = builtin.zig_backend == .stage2_x86_64 and + !std.Target.x86.featureSetHas(builtin.cpu.features, .f16c); const epsilon_16 = 0.001; const epsilon = 0.000001; @@ -52,8 +54,7 @@ fn testFloatComparisons() !void { } test "different sized float comparisons" { - if (builtin.zig_backend == .stage2_x86_64 and - !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .f16c)) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_f16_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -152,7 +153,7 @@ fn testSqrtWithVectors() !void { } test "more @sqrt f16 tests" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_f16_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -202,7 +203,7 @@ fn testSqrtLegacy(comptime T: type, x: T) !void { } test "@sin" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_f16_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -241,7 +242,7 @@ fn testSinWithVectors() !void { } test "@cos" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_f16_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -280,7 +281,7 @@ fn testCosWithVectors() !void { } test "@exp" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_f16_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -318,7 +319,7 @@ fn testExpWithVectors() !void { } test "@exp2" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_f16_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -403,7 +404,7 @@ test "@log with @vectors" { } test "@log2" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_f16_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -445,7 +446,7 @@ fn testLog2WithVectors() !void { } test "@log10" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_f16_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -881,7 +882,7 @@ fn testTruncLegacy(comptime T: type, x: T) !void { } test "negation f16" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_f16_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1040,7 +1041,6 @@ test "comptime_float zero divided by zero produces zero" { } test "nan negation f16" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig index bfb94de270..199f117e7b 100644 --- a/test/behavior/muladd.zig +++ b/test/behavior/muladd.zig @@ -2,11 +2,11 @@ const std = @import("std"); const builtin = @import("builtin"); const expect = std.testing.expect; -const stage2_x86_64_without_hardware_fma_support = builtin.zig_backend == .stage2_x86_64 and +const no_x86_64_hardware_fma_support = builtin.zig_backend == .stage2_x86_64 and !std.Target.x86.featureSetHas(builtin.cpu.features, .fma); test "@mulAdd" { - if (stage2_x86_64_without_hardware_fma_support) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_fma_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -120,7 +120,7 @@ fn vector32() !void { test "vector f32" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (stage2_x86_64_without_hardware_fma_support) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_fma_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -143,7 +143,7 @@ fn vector64() !void { test "vector f64" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (stage2_x86_64_without_hardware_fma_support) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_fma_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 6c14eb2863c7c00f809c5e447ceb8186b55f2eef Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 May 2023 06:50:18 -0400 Subject: x86_64: optimize mir tag usage This moves all pseudo-instructions to a single `Mir.Inst.Tag` tag and prepares to start coalescing similar mnemonics. 239 tags left in use. --- src/arch/x86_64/CodeGen.zig | 403 +++++++++++++++--------- src/arch/x86_64/Emit.zig | 65 ++-- src/arch/x86_64/Encoding.zig | 2 +- src/arch/x86_64/Lower.zig | 713 +++++++++++++------------------------------ src/arch/x86_64/Mir.zig | 517 +++++++++++++++++++++---------- src/arch/x86_64/bits.zig | 3 - 6 files changed, 839 insertions(+), 864 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 154b909a21..3ac05c95ac 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -973,14 +973,14 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { try self.mir_instructions.ensureUnusedCapacity(gpa, 1); const result_index = @intCast(Mir.Inst.Index, self.mir_instructions.len); self.mir_instructions.appendAssumeCapacity(inst); - switch (inst.tag) { - else => wip_mir_log.debug("{}", .{self.fmtWipMir(result_index)}), - .dbg_line, - .dbg_prologue_end, - .dbg_epilogue_begin, - .dead, - => {}, - } + if (inst.tag != .pseudo or switch (inst.ops) { + else => true, + .pseudo_dbg_prologue_end_none, + .pseudo_dbg_line_line_column, + .pseudo_dbg_epilogue_begin_none, + .pseudo_dead_none, + => false, + }) wip_mir_log.debug("{}", .{self.fmtWipMir(result_index)}); return result_index; } @@ -1003,35 +1003,57 @@ fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { return result; } -fn asmSetccRegister(self: *Self, reg: Register, cc: bits.Condition) !void { +/// A `cc` of `.z_and_np` clobbers `reg2`! +fn asmCmovccRegisterRegister(self: *Self, reg1: Register, reg2: Register, cc: bits.Condition) !void { _ = try self.addInst(.{ - .tag = .setcc, - .ops = .r_cc, - .data = .{ .r_cc = .{ - .r = reg, - .scratch = if (cc == .z_and_np or cc == .nz_or_p) - (try self.register_manager.allocReg(null, gp)).to8() - else - .none, - .cc = cc, + .tag = switch (cc) { + else => .cmov, + .z_and_np, .nz_or_p => .pseudo, + }, + .ops = switch (cc) { + else => .rr, + .z_and_np => .pseudo_cmov_z_and_np_rr, + .nz_or_p => .pseudo_cmov_nz_or_p_rr, + }, + .data = .{ .rr = .{ + .fixes = switch (cc) { + else => Mir.Inst.Fixes.fromCondition(cc), + .z_and_np, .nz_or_p => ._, + }, + .r1 = reg1, + .r2 = reg2, } }, }); } -fn asmSetccMemory(self: *Self, m: Memory, cc: bits.Condition) !void { +/// A `cc` of `.z_and_np` is not supported by this encoding! +fn asmCmovccRegisterMemory(self: *Self, reg: Register, m: Memory, cc: bits.Condition) !void { _ = try self.addInst(.{ - .tag = .setcc, - .ops = switch (m) { - .sib => .m_sib_cc, - .rip => .m_rip_cc, - else => unreachable, + .tag = switch (cc) { + else => .cmov, + .z_and_np => unreachable, + .nz_or_p => .pseudo, }, - .data = .{ .x_cc = .{ - .scratch = if (cc == .z_and_np or cc == .nz_or_p) - (try self.register_manager.allocReg(null, gp)).to8() - else - .none, - .cc = cc, + .ops = switch (cc) { + else => switch (m) { + .sib => .rm_sib, + .rip => .rm_rip, + else => unreachable, + }, + .z_and_np => unreachable, + .nz_or_p => switch (m) { + .sib => .pseudo_cmov_nz_or_p_rm_sib, + .rip => .pseudo_cmov_nz_or_p_rm_rip, + else => unreachable, + }, + }, + .data = .{ .rx = .{ + .fixes = switch (cc) { + else => Mir.Inst.Fixes.fromCondition(cc), + .z_and_np => unreachable, + .nz_or_p => ._, + }, + .r1 = reg, .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1041,60 +1063,106 @@ fn asmSetccMemory(self: *Self, m: Memory, cc: bits.Condition) !void { }); } -/// A `cc` of `.z_and_np` clobbers `reg2`! -fn asmCmovccRegisterRegister(self: *Self, reg1: Register, reg2: Register, cc: bits.Condition) !void { +fn asmSetccRegister(self: *Self, reg: Register, cc: bits.Condition) !void { _ = try self.addInst(.{ - .tag = .cmovcc, - .ops = .rr_cc, - .data = .{ .rr_cc = .{ - .r1 = reg1, - .r2 = reg2, - .cc = cc, - } }, + .tag = switch (cc) { + else => .set, + .z_and_np, .nz_or_p => .pseudo, + }, + .ops = switch (cc) { + else => .r, + .z_and_np => .pseudo_set_z_and_np_r, + .nz_or_p => .pseudo_set_nz_or_p_r, + }, + .data = switch (cc) { + else => .{ .r = .{ + .fixes = Mir.Inst.Fixes.fromCondition(cc), + .r1 = reg, + } }, + .z_and_np, .nz_or_p => .{ .r_scratch = .{ + .r1 = reg, + .scratch_reg = (try self.register_manager.allocReg(null, gp)).to8(), + } }, + }, }); } -fn asmCmovccRegisterMemory(self: *Self, reg: Register, m: Memory, cc: bits.Condition) !void { - assert(cc != .z_and_np); // not supported +fn asmSetccMemory(self: *Self, m: Memory, cc: bits.Condition) !void { + const payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }; _ = try self.addInst(.{ - .tag = .cmovcc, - .ops = switch (m) { - .sib => .rm_sib_cc, - .rip => .rm_rip_cc, - else => unreachable, + .tag = switch (cc) { + else => .set, + .z_and_np, .nz_or_p => .pseudo, }, - .data = .{ .rx_cc = .{ - .r = reg, - .cc = cc, - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + .ops = switch (cc) { + else => switch (m) { + .sib => .m_sib, + .rip => .m_rip, else => unreachable, }, - } }, + .z_and_np => switch (m) { + .sib => .pseudo_set_z_and_np_m_sib, + .rip => .pseudo_set_z_and_np_m_rip, + else => unreachable, + }, + .nz_or_p => switch (m) { + .sib => .pseudo_set_nz_or_p_m_sib, + .rip => .pseudo_set_nz_or_p_m_rip, + else => unreachable, + }, + }, + .data = switch (cc) { + else => .{ .x = .{ + .fixes = Mir.Inst.Fixes.fromCondition(cc), + .payload = payload, + } }, + .z_and_np, .nz_or_p => .{ .x_scratch = .{ + .scratch_reg = (try self.register_manager.allocReg(null, gp)).to8(), + .payload = payload, + } }, + }, }); } fn asmJmpReloc(self: *Self, target: Mir.Inst.Index) !Mir.Inst.Index { return self.addInst(.{ - .tag = .jmp_reloc, - .ops = undefined, - .data = .{ .inst = target }, + .tag = .jmp, + .ops = .inst, + .data = .{ .inst = .{ + .inst = target, + } }, }); } fn asmJccReloc(self: *Self, target: Mir.Inst.Index, cc: bits.Condition) !Mir.Inst.Index { return self.addInst(.{ - .tag = .jcc, - .ops = .inst_cc, - .data = .{ .inst_cc = .{ .inst = target, .cc = cc } }, + .tag = switch (cc) { + else => .j, + .z_and_np, .nz_or_p => .pseudo, + }, + .ops = switch (cc) { + else => .inst, + .z_and_np => .pseudo_j_z_and_np_inst, + .nz_or_p => .pseudo_j_nz_or_p_inst, + }, + .data = .{ .inst = .{ + .fixes = switch (cc) { + else => Mir.Inst.Fixes.fromCondition(cc), + .z_and_np, .nz_or_p => ._, + }, + .inst = target, + } }, }); } fn asmPlaceholder(self: *Self) !Mir.Inst.Index { return self.addInst(.{ - .tag = .dead, - .ops = undefined, + .tag = .pseudo, + .ops = .pseudo_dead_none, .data = undefined, }); } @@ -1107,11 +1175,19 @@ fn asmOpOnly(self: *Self, tag: Mir.Inst.Tag) !void { }); } +fn asmPseudo(self: *Self, ops: Mir.Inst.Ops) !void { + _ = try self.addInst(.{ + .tag = .pseudo, + .ops = ops, + .data = undefined, + }); +} + fn asmRegister(self: *Self, tag: Mir.Inst.Tag, reg: Register) !void { _ = try self.addInst(.{ .tag = tag, .ops = .r, - .data = .{ .r = reg }, + .data = .{ .r = .{ .r1 = reg } }, }); } @@ -1122,9 +1198,11 @@ fn asmImmediate(self: *Self, tag: Mir.Inst.Tag, imm: Immediate) !void { .signed => .i_s, .unsigned => .i_u, }, - .data = .{ .i = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), + .data = .{ .i = .{ + .i = switch (imm) { + .signed => |s| @bitCast(u32, s), + .unsigned => |u| @intCast(u32, u), + }, } }, }); } @@ -1147,14 +1225,14 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.Tag, reg: Register, imm: Imme .ops = ops, .data = switch (ops) { .ri_s, .ri_u => .{ .ri = .{ - .r = reg, + .r1 = reg, .i = switch (imm) { .signed => |s| @bitCast(u32, s), .unsigned => |u| @intCast(u32, u), }, } }, .ri64 => .{ .rx = .{ - .r = reg, + .r1 = reg, .payload = try self.addExtra(Mir.Imm64.encode(imm.unsigned)), } }, else => unreachable, @@ -1249,10 +1327,12 @@ fn asmMemory(self: *Self, tag: Mir.Inst.Tag, m: Memory) !void { .rip => .m_rip, else => unreachable, }, - .data = .{ .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, + .data = .{ .x = .{ + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, } }, }); } @@ -1266,7 +1346,7 @@ fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) ! else => unreachable, }, .data = .{ .rx = .{ - .r = reg, + .r1 = reg, .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1291,7 +1371,7 @@ fn asmRegisterMemoryImmediate( else => unreachable, }, .data = .{ .rix = .{ - .r = reg, + .r1 = reg, .i = @intCast(u8, imm.unsigned), .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), @@ -1339,7 +1419,7 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) ! else => unreachable, }, .data = .{ .rx = .{ - .r = reg, + .r1 = reg, .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1413,11 +1493,15 @@ fn asmMemoryRegisterImmediate( .rip => .mri_rip, else => unreachable, }, - .data = .{ .rix = .{ .r = reg, .i = @intCast(u8, imm.unsigned), .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - } } }, + .data = .{ .rix = .{ + .r1 = reg, + .i = @intCast(u8, imm.unsigned), + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, + } }, }); } @@ -1450,7 +1534,7 @@ fn gen(self: *Self) InnerError!void { else => unreachable, } - try self.asmOpOnly(.dbg_prologue_end); + try self.asmPseudo(.pseudo_dbg_prologue_end_none); try self.genBody(self.air.getMainBody()); @@ -1462,11 +1546,11 @@ fn gen(self: *Self) InnerError!void { // } // Eliding the reloc will cause a miscompilation in this case. for (self.exitlude_jump_relocs.items) |jmp_reloc| { - self.mir_instructions.items(.data)[jmp_reloc].inst = + self.mir_instructions.items(.data)[jmp_reloc].inst.inst = @intCast(u32, self.mir_instructions.len); } - try self.asmOpOnly(.dbg_epilogue_begin); + try self.asmPseudo(.pseudo_dbg_epilogue_begin_none); const backpatch_stack_dealloc = try self.asmPlaceholder(); const backpatch_pop_callee_preserved_regs = try self.asmPlaceholder(); try self.asmRegister(.pop, .rbp); @@ -1480,46 +1564,54 @@ fn gen(self: *Self) InnerError!void { self.mir_instructions.set(backpatch_frame_align, .{ .tag = .@"and", .ops = .ri_s, - .data = .{ .ri = .{ .r = .rsp, .i = frame_layout.stack_mask } }, + .data = .{ .ri = .{ + .r1 = .rsp, + .i = frame_layout.stack_mask, + } }, }); } if (need_stack_adjust) { self.mir_instructions.set(backpatch_stack_alloc, .{ .tag = .sub, .ops = .ri_s, - .data = .{ .ri = .{ .r = .rsp, .i = frame_layout.stack_adjust } }, + .data = .{ .ri = .{ + .r1 = .rsp, + .i = frame_layout.stack_adjust, + } }, }); } if (need_frame_align or need_stack_adjust) { self.mir_instructions.set(backpatch_stack_dealloc, .{ .tag = .mov, .ops = .rr, - .data = .{ .rr = .{ .r1 = .rsp, .r2 = .rbp } }, + .data = .{ .rr = .{ + .r1 = .rsp, + .r2 = .rbp, + } }, }); } if (need_save_reg) { - const save_reg_list = frame_layout.save_reg_list.asInt(); self.mir_instructions.set(backpatch_push_callee_preserved_regs, .{ - .tag = .push_regs, - .ops = undefined, - .data = .{ .payload = save_reg_list }, + .tag = .pseudo, + .ops = .pseudo_push_reg_list, + .data = .{ .reg_list = frame_layout.save_reg_list }, }); self.mir_instructions.set(backpatch_pop_callee_preserved_regs, .{ - .tag = .pop_regs, - .ops = undefined, - .data = .{ .payload = save_reg_list }, + .tag = .pseudo, + .ops = .pseudo_pop_reg_list, + .data = .{ .reg_list = frame_layout.save_reg_list }, }); } } else { - try self.asmOpOnly(.dbg_prologue_end); + try self.asmPseudo(.pseudo_dbg_prologue_end_none); try self.genBody(self.air.getMainBody()); - try self.asmOpOnly(.dbg_epilogue_begin); + try self.asmPseudo(.pseudo_dbg_epilogue_begin_none); } // Drop them off at the rbrace. _ = try self.addInst(.{ - .tag = .dbg_line, - .ops = undefined, + .tag = .pseudo, + .ops = .pseudo_dbg_line_line_column, .data = .{ .line_column = .{ .line = self.end_di_line, .column = self.end_di_column, @@ -2446,11 +2538,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { .register => |dst_reg| { const min_abi_size = @min(dst_abi_size, src_abi_size); const tag: Mir.Inst.Tag = switch (signedness) { - .signed => .movsx, - .unsigned => if (min_abi_size > 2) .mov else .movzx, + .signed => if (min_abi_size >= 4) .movsxd else .movsx, + .unsigned => if (min_abi_size >= 4) .mov else .movzx, }; const dst_alias = switch (tag) { - .movsx => dst_reg.to64(), + .movsx, .movsxd => dst_reg.to64(), .mov, .movzx => if (min_abi_size > 4) dst_reg.to64() else dst_reg.to32(), else => unreachable, }; @@ -5247,7 +5339,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const field_byte_size = @intCast(u32, field_ty.abiSize(self.target.*)); if (signedness == .signed and field_byte_size < 8) { try self.asmRegisterRegister( - .movsx, + if (field_byte_size >= 4) .movsxd else .movsx, dst_mcv.register, registerAlias(dst_mcv.register, field_byte_size), ); @@ -7194,10 +7286,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try self.owner.getSymbolIndex(self); const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); _ = try self.addInst(.{ - .tag = .mov_linker, + .tag = .mov, .ops = .import_reloc, .data = .{ .rx = .{ - .r = .rax, + .r1 = .rax, .payload = try self.addExtra(Mir.Reloc{ .atom_index = atom_index, .sym_index = sym_index, @@ -7209,9 +7301,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom_index = try self.owner.getSymbolIndex(self); const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); _ = try self.addInst(.{ - .tag = .call_extern, - .ops = undefined, - .data = .{ .relocation = .{ + .tag = .call, + .ops = .extern_fn_reloc, + .data = .{ .reloc = .{ .atom_index = atom_index, .sym_index = sym_index, } }, @@ -7489,8 +7581,8 @@ fn genTry( fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; _ = try self.addInst(.{ - .tag = .dbg_line, - .ops = undefined, + .tag = .pseudo, + .ops = .pseudo_dbg_line_line_column, .data = .{ .line_column = .{ .line = dbg_stmt.line, .column = dbg_stmt.column, @@ -8021,14 +8113,14 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void { const next_inst = @intCast(u32, self.mir_instructions.len); switch (self.mir_instructions.items(.tag)[reloc]) { - .jcc => { - self.mir_instructions.items(.data)[reloc].inst_cc.inst = next_inst; - }, - .jmp_reloc => { - self.mir_instructions.items(.data)[reloc].inst = next_inst; + .j, .jmp => {}, + .pseudo => switch (self.mir_instructions.items(.ops)[reloc]) { + .pseudo_j_z_and_np_inst, .pseudo_j_nz_or_p_inst => {}, + else => unreachable, }, else => unreachable, } + self.mir_instructions.items(.data)[reloc].inst.inst = next_inst; } fn airBr(self: *Self, inst: Air.Inst.Index) !void { @@ -8577,10 +8669,10 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .load_direct => |sym_index| if (!ty.isRuntimeFloat()) { const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ - .tag = .mov_linker, + .tag = .mov, .ops = .direct_reloc, .data = .{ .rx = .{ - .r = dst_reg.to64(), + .r1 = dst_reg.to64(), .payload = try self.addExtra(Mir.Reloc{ .atom_index = atom_index, .sym_index = sym_index, @@ -8618,8 +8710,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ .tag = switch (src_mcv) { - .lea_direct => .lea_linker, - .lea_got => .mov_linker, + .lea_direct => .lea, + .lea_got => .mov, else => unreachable, }, .ops = switch (src_mcv) { @@ -8628,7 +8720,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr else => unreachable, }, .data = .{ .rx = .{ - .r = dst_reg.to64(), + .r1 = dst_reg.to64(), .payload = try self.addExtra(Mir.Reloc{ .atom_index = atom_index, .sym_index = sym_index, @@ -8640,10 +8732,10 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr const atom_index = try self.owner.getSymbolIndex(self); if (self.bin_file.cast(link.File.MachO)) |_| { _ = try self.addInst(.{ - .tag = .lea_linker, + .tag = .lea, .ops = .tlv_reloc, .data = .{ .rx = .{ - .r = .rdi, + .r1 = .rdi, .payload = try self.addExtra(Mir.Reloc{ .atom_index = atom_index, .sym_index = sym_index, @@ -8847,9 +8939,9 @@ fn genInlineMemcpy(self: *Self, dst_ptr: MCValue, src_ptr: MCValue, len: MCValue try self.genSetReg(.rsi, Type.usize, src_ptr); try self.genSetReg(.rcx, Type.usize, len); _ = try self.addInst(.{ - .tag = .movs, - .ops = .string, - .data = .{ .string = .{ .repeat = .rep, .width = .b } }, + .tag = .mov, + .ops = .none, + .data = .{ .none = .{ .fixes = .@"rep _sb" } }, }); } @@ -8859,9 +8951,9 @@ fn genInlineMemset(self: *Self, dst_ptr: MCValue, value: MCValue, len: MCValue) try self.genSetReg(.al, Type.u8, value); try self.genSetReg(.rcx, Type.usize, len); _ = try self.addInst(.{ - .tag = .stos, - .ops = .string, - .data = .{ .string = .{ .repeat = .rep, .width = .b } }, + .tag = .sto, + .ops = .none, + .data = .{ .none = .{ .fixes = .@"rep _sb" } }, }); } @@ -9135,22 +9227,22 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); try self.spillEflagsIfOccupied(); - if (val_abi_size <= 8) { - _ = try self.addInst(.{ - .tag = .cmpxchg, - .ops = .lock_mr_sib, - .data = .{ .rx = .{ - .r = registerAlias(new_reg.?, val_abi_size), - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } }, - }); - } else { - _ = try self.addInst(.{ - .tag = .cmpxchgb, - .ops = .lock_m_sib, - .data = .{ .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)) }, - }); - } + _ = try self.addInst(if (val_abi_size <= 8) .{ + .tag = .cmpxchg, + .ops = .mr_sib, + .data = .{ .rx = .{ + .fixes = .@"lock _", + .r1 = registerAlias(new_reg.?, val_abi_size), + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } }, + } else .{ + .tag = .cmpxchg, + .ops = .m_sib, + .data = .{ .x = .{ + .fixes = .@"lock _16b", + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } }, + }); const result: MCValue = result: { if (self.liveness.isUnused(inst)) break :result .unreach; @@ -9252,13 +9344,14 @@ fn atomicOp( } _ = try self.addInst(.{ .tag = tag, - .ops = switch (tag) { - .mov, .xchg => .mr_sib, - .xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib, - else => unreachable, - }, + .ops = .mr_sib, .data = .{ .rx = .{ - .r = registerAlias(dst_reg, val_abi_size), + .fixes = switch (tag) { + .mov, .xchg => ._, + .xadd, .add, .sub, .@"and", .@"or", .xor => .@"lock _", + else => unreachable, + }, + .r1 = registerAlias(dst_reg, val_abi_size), .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), } }, }); @@ -9330,9 +9423,10 @@ fn atomicOp( }; _ = try self.addInst(.{ .tag = .cmpxchg, - .ops = .lock_mr_sib, + .ops = .mr_sib, .data = .{ .rx = .{ - .r = registerAlias(tmp_reg, val_abi_size), + .fixes = .@"lock _", + .r1 = registerAlias(tmp_reg, val_abi_size), .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), } }, }); @@ -9397,9 +9491,14 @@ fn atomicOp( val_ty.fmt(self.bin_file.options.module.?), @tagName(op), }), }; - _ = try self.addInst(.{ .tag = .cmpxchgb, .ops = .lock_m_sib, .data = .{ - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } }); + _ = try self.addInst(.{ + .tag = .cmpxchg, + .ops = .m_sib, + .data = .{ .x = .{ + .fixes = .@"lock _16b", + .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), + } }, + }); _ = try self.asmJccReloc(loop, .ne); if (unused) return .unreach; diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index 3574d52878..506092ff17 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -41,7 +41,7 @@ pub fn emitMir(emit: *Emit) Error!void { .offset = end_offset - 4, .length = @intCast(u5, end_offset - start_offset), }), - .@"extern" => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| { + .linker_extern_fn => |symbol| if (emit.bin_file.cast(link.File.MachO)) |macho_file| { // Add relocation to the decl. const atom_index = macho_file.getAtomIndexForSymbol( .{ .sym_index = symbol.atom_index, .file = null }, @@ -129,36 +129,39 @@ pub fn emitMir(emit: *Emit) Error!void { const mir_inst = emit.lower.mir.instructions.get(mir_index); switch (mir_inst.tag) { else => unreachable, - .dead => {}, - .dbg_line => try emit.dbgAdvancePCAndLine( - mir_inst.data.line_column.line, - mir_inst.data.line_column.column, - ), - .dbg_prologue_end => { - switch (emit.debug_output) { - .dwarf => |dw| { - try dw.setPrologueEnd(); - log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{ - emit.prev_di_line, emit.prev_di_column, - }); - try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); - }, - .plan9 => {}, - .none => {}, - } - }, - .dbg_epilogue_begin => { - switch (emit.debug_output) { - .dwarf => |dw| { - try dw.setEpilogueBegin(); - log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{ - emit.prev_di_line, emit.prev_di_column, - }); - try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); - }, - .plan9 => {}, - .none => {}, - } + .pseudo => switch (mir_inst.ops) { + else => unreachable, + .pseudo_dbg_prologue_end_none => { + switch (emit.debug_output) { + .dwarf => |dw| { + try dw.setPrologueEnd(); + log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{ + emit.prev_di_line, emit.prev_di_column, + }); + try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); + }, + .plan9 => {}, + .none => {}, + } + }, + .pseudo_dbg_line_line_column => try emit.dbgAdvancePCAndLine( + mir_inst.data.line_column.line, + mir_inst.data.line_column.column, + ), + .pseudo_dbg_epilogue_begin_none => { + switch (emit.debug_output) { + .dwarf => |dw| { + try dw.setEpilogueBegin(); + log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{ + emit.prev_di_line, emit.prev_di_column, + }); + try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column); + }, + .plan9 => {}, + .none => {}, + } + }, + .pseudo_dead_none => {}, }, } } diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 3235b29358..b6b49e8939 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -705,7 +705,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op } const mnemonic_to_encodings_map = init: { - @setEvalBranchQuota(100_000); + @setEvalBranchQuota(20_000); const encodings = @import("encodings.zig"); var entries = encodings.table; std.sort.sort(encodings.Entry, &entries, {}, struct { diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 5c079f4768..2d7fa4b4fd 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -35,7 +35,7 @@ pub const Reloc = struct { const Target = union(enum) { inst: Mir.Inst.Index, - @"extern": Mir.Reloc, + linker_extern_fn: Mir.Reloc, linker_got: Mir.Reloc, linker_direct: Mir.Reloc, linker_import: Mir.Reloc, @@ -59,280 +59,119 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { const inst = lower.mir.instructions.get(index); switch (inst.tag) { - .adc, - .add, - .@"and", - .bsf, - .bsr, - .bswap, - .bt, - .btc, - .btr, - .bts, - .call, - .cbw, - .cwde, - .cdqe, - .cwd, - .cdq, - .cqo, - .cmp, - .cmpxchg, - .div, - .fisttp, - .fld, - .idiv, - .imul, - .int3, - .jmp, - .lea, - .lfence, - .lzcnt, - .mfence, - .mov, - .movbe, - .movd, - .movq, - .movzx, - .mul, - .neg, - .nop, - .not, - .@"or", - .pop, - .popcnt, - .push, - .rcl, - .rcr, - .ret, - .rol, - .ror, - .sal, - .sar, - .sbb, - .sfence, - .shl, - .shld, - .shr, - .shrd, - .sub, - .syscall, - .@"test", - .tzcnt, - .ud2, - .xadd, - .xchg, - .xor, - - .addps, - .addss, - .andnps, - .andps, - .cmpss, - .cvtsi2ss, - .divps, - .divss, - .maxps, - .maxss, - .minps, - .minss, - .movaps, - .movhlps, - .movss, - .movups, - .mulps, - .mulss, - .orps, - .pextrw, - .pinsrw, - .sqrtps, - .sqrtss, - .subps, - .subss, - .ucomiss, - .unpckhps, - .unpcklps, - .xorps, - - .addpd, - .addsd, - .andnpd, - .andpd, - .cmpsd, - .cvtsd2ss, - .cvtsi2sd, - .cvtss2sd, - .divpd, - .divsd, - .maxpd, - .maxsd, - .minpd, - .minsd, - .movsd, - .mulpd, - .mulsd, - .orpd, - .pshufhw, - .pshuflw, - .psrld, - .psrlq, - .psrlw, - .punpckhbw, - .punpckhdq, - .punpckhqdq, - .punpckhwd, - .punpcklbw, - .punpckldq, - .punpcklqdq, - .punpcklwd, - .sqrtpd, - .sqrtsd, - .subpd, - .subsd, - .ucomisd, - .unpckhpd, - .unpcklpd, - .xorpd, - - .movddup, - .movshdup, - .movsldup, - - .pextrb, - .pextrd, - .pextrq, - .pinsrb, - .pinsrd, - .pinsrq, - .roundpd, - .roundps, - .roundsd, - .roundss, - - .vaddpd, - .vaddps, - .vaddsd, - .vaddss, - .vcvtsd2ss, - .vcvtsi2sd, - .vcvtsi2ss, - .vcvtss2sd, - .vdivpd, - .vdivps, - .vdivsd, - .vdivss, - .vmaxpd, - .vmaxps, - .vmaxsd, - .vmaxss, - .vminpd, - .vminps, - .vminsd, - .vminss, - .vmovapd, - .vmovaps, - .vmovddup, - .vmovhlps, - .vmovsd, - .vmovshdup, - .vmovsldup, - .vmovss, - .vmovupd, - .vmovups, - .vmulpd, - .vmulps, - .vmulsd, - .vmulss, - .vpextrb, - .vpextrd, - .vpextrq, - .vpextrw, - .vpinsrb, - .vpinsrd, - .vpinsrq, - .vpinsrw, - .vpshufhw, - .vpshuflw, - .vpsrld, - .vpsrlq, - .vpsrlw, - .vpunpckhbw, - .vpunpckhdq, - .vpunpckhqdq, - .vpunpckhwd, - .vpunpcklbw, - .vpunpckldq, - .vpunpcklqdq, - .vpunpcklwd, - .vroundpd, - .vroundps, - .vroundsd, - .vroundss, - .vsqrtpd, - .vsqrtps, - .vsqrtsd, - .vsqrtss, - .vsubpd, - .vsubps, - .vsubsd, - .vsubss, - .vunpckhpd, - .vunpckhps, - .vunpcklpd, - .vunpcklps, - - .vcvtph2ps, - .vcvtps2ph, - - .vfmadd132pd, - .vfmadd213pd, - .vfmadd231pd, - .vfmadd132ps, - .vfmadd213ps, - .vfmadd231ps, - .vfmadd132sd, - .vfmadd213sd, - .vfmadd231sd, - .vfmadd132ss, - .vfmadd213ss, - .vfmadd231ss, - => try lower.mirGeneric(inst), - - .cmps, - .lods, - .movs, - .scas, - .stos, - => try lower.mirString(inst), - - .cmpxchgb => try lower.mirCmpxchgBytes(inst), - - .jmp_reloc => try lower.emitInstWithReloc(.none, .jmp, &.{ - .{ .imm = Immediate.s(0) }, - }, .{ .inst = inst.data.inst }), - - .call_extern => try lower.emitInstWithReloc(.none, .call, &.{ - .{ .imm = Immediate.s(0) }, - }, .{ .@"extern" = inst.data.relocation }), - - .lea_linker => try lower.mirLinker(.lea, inst), - .mov_linker => try lower.mirLinker(.mov, inst), - - .mov_moffs => try lower.mirMovMoffs(inst), - - .movsx => try lower.mirMovsx(inst), - .cmovcc => try lower.mirCmovcc(inst), - .setcc => try lower.mirSetcc(inst), - .jcc => try lower.mirJcc(index, inst), + else => try lower.generic(inst), + .pseudo => switch (inst.ops) { + .pseudo_cmov_z_and_np_rr => { + try lower.emit(.none, .cmovnz, &.{ + .{ .reg = inst.data.rr.r2 }, + .{ .reg = inst.data.rr.r1 }, + }); + try lower.emit(.none, .cmovnp, &.{ + .{ .reg = inst.data.rr.r1 }, + .{ .reg = inst.data.rr.r2 }, + }); + }, + .pseudo_cmov_nz_or_p_rr => { + try lower.emit(.none, .cmovnz, &.{ + .{ .reg = inst.data.rr.r1 }, + .{ .reg = inst.data.rr.r2 }, + }); + try lower.emit(.none, .cmovp, &.{ + .{ .reg = inst.data.rr.r1 }, + .{ .reg = inst.data.rr.r2 }, + }); + }, + .pseudo_cmov_nz_or_p_rm_sib, + .pseudo_cmov_nz_or_p_rm_rip, + => { + try lower.emit(.none, .cmovnz, &.{ + .{ .reg = inst.data.rx.r1 }, + .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, + }); + try lower.emit(.none, .cmovp, &.{ + .{ .reg = inst.data.rx.r1 }, + .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, + }); + }, + .pseudo_set_z_and_np_r => { + try lower.emit(.none, .setz, &.{ + .{ .reg = inst.data.r_scratch.r1 }, + }); + try lower.emit(.none, .setnp, &.{ + .{ .reg = inst.data.r_scratch.scratch_reg }, + }); + try lower.emit(.none, .@"and", &.{ + .{ .reg = inst.data.r_scratch.r1 }, + .{ .reg = inst.data.r_scratch.scratch_reg }, + }); + }, + .pseudo_set_z_and_np_m_sib, + .pseudo_set_z_and_np_m_rip, + => { + try lower.emit(.none, .setz, &.{ + .{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) }, + }); + try lower.emit(.none, .setnp, &.{ + .{ .reg = inst.data.x_scratch.scratch_reg }, + }); + try lower.emit(.none, .@"and", &.{ + .{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) }, + .{ .reg = inst.data.x_scratch.scratch_reg }, + }); + }, + .pseudo_set_nz_or_p_r => { + try lower.emit(.none, .setnz, &.{ + .{ .reg = inst.data.r_scratch.r1 }, + }); + try lower.emit(.none, .setp, &.{ + .{ .reg = inst.data.r_scratch.scratch_reg }, + }); + try lower.emit(.none, .@"or", &.{ + .{ .reg = inst.data.r_scratch.r1 }, + .{ .reg = inst.data.r_scratch.scratch_reg }, + }); + }, + .pseudo_set_nz_or_p_m_sib, + .pseudo_set_nz_or_p_m_rip, + => { + try lower.emit(.none, .setnz, &.{ + .{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) }, + }); + try lower.emit(.none, .setp, &.{ + .{ .reg = inst.data.x_scratch.scratch_reg }, + }); + try lower.emit(.none, .@"or", &.{ + .{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) }, + .{ .reg = inst.data.x_scratch.scratch_reg }, + }); + }, + .pseudo_j_z_and_np_inst => { + try lower.emit(.none, .jnz, &.{ + .{ .imm = lower.reloc(.{ .inst = index + 1 }) }, + }); + try lower.emit(.none, .jnp, &.{ + .{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) }, + }); + }, + .pseudo_j_nz_or_p_inst => { + try lower.emit(.none, .jnz, &.{ + .{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) }, + }); + try lower.emit(.none, .jp, &.{ + .{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) }, + }); + }, - .push_regs => try lower.mirRegisterList(.push, inst), - .pop_regs => try lower.mirRegisterList(.pop, inst), + .pseudo_push_reg_list => try lower.pushPopRegList(.push, inst), + .pseudo_pop_reg_list => try lower.pushPopRegList(.pop, inst), - .dbg_line, - .dbg_prologue_end, - .dbg_epilogue_begin, - .dead, - => {}, + .pseudo_dbg_prologue_end_none, + .pseudo_dbg_line_line_column, + .pseudo_dbg_epilogue_begin_none, + .pseudo_dead_none, + => {}, + else => unreachable, + }, } return .{ @@ -348,15 +187,6 @@ pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error { return error.LowerFail; } -fn mnem_cc(comptime base: @Type(.EnumLiteral), cc: bits.Condition) Mnemonic { - return switch (cc) { - inline else => |c| if (@hasField(Mnemonic, @tagName(base) ++ @tagName(c))) - @field(Mnemonic, @tagName(base) ++ @tagName(c)) - else - unreachable, - }; -} - fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { return switch (ops) { .rri_s, @@ -364,8 +194,6 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { .i_s, .mi_sib_s, .mi_rip_s, - .lock_mi_sib_s, - .lock_mi_rip_s, => Immediate.s(@bitCast(i32, i)), .rrri, @@ -374,8 +202,6 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { .i_u, .mi_sib_u, .mi_rip_u, - .lock_mi_sib_u, - .lock_mi_rip_u, .rmi_sib, .rmi_rip, .mri_sib, @@ -395,10 +221,8 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { return lower.mir.resolveFrameLoc(switch (ops) { .rm_sib, - .rm_sib_cc, .rmi_sib, .m_sib, - .m_sib_cc, .mi_sib_u, .mi_sib_s, .mr_sib, @@ -406,17 +230,15 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { .mri_sib, .rrm_sib, .rrmi_sib, - .lock_m_sib, - .lock_mi_sib_u, - .lock_mi_sib_s, - .lock_mr_sib, + + .pseudo_cmov_nz_or_p_rm_sib, + .pseudo_set_z_and_np_m_sib, + .pseudo_set_nz_or_p_m_sib, => lower.mir.extraData(Mir.MemorySib, payload).data.decode(), .rm_rip, - .rm_rip_cc, .rmi_rip, .m_rip, - .m_rip_cc, .mi_rip_u, .mi_rip_s, .mr_rip, @@ -424,66 +246,83 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { .mri_rip, .rrm_rip, .rrmi_rip, - .lock_m_rip, - .lock_mi_rip_u, - .lock_mi_rip_s, - .lock_mr_rip, + + .pseudo_cmov_nz_or_p_rm_rip, + .pseudo_set_z_and_np_m_rip, + .pseudo_set_nz_or_p_m_rip, => lower.mir.extraData(Mir.MemoryRip, payload).data.decode(), .rax_moffs, .moffs_rax, - .lock_moffs_rax, => lower.mir.extraData(Mir.MemoryMoffs, payload).data.decode(), else => unreachable, }); } -fn emitInst(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void { - lower.result_insts[lower.result_insts_len] = try Instruction.new(prefix, mnemonic, ops); - lower.result_insts_len += 1; -} - -fn emitInstWithReloc( - lower: *Lower, - prefix: Prefix, - mnemonic: Mnemonic, - ops: []const Operand, - target: Reloc.Target, -) Error!void { +fn reloc(lower: *Lower, target: Reloc.Target) Immediate { lower.result_relocs[lower.result_relocs_len] = .{ .lowered_inst_index = lower.result_insts_len, .target = target, }; lower.result_relocs_len += 1; - try lower.emitInst(prefix, mnemonic, ops); + return Immediate.s(0); +} + +fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void { + lower.result_insts[lower.result_insts_len] = try Instruction.new(prefix, mnemonic, ops); + lower.result_insts_len += 1; } -fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { - try lower.emitInst(switch (inst.ops) { - else => .none, - .lock_m_sib, - .lock_m_rip, - .lock_mi_sib_u, - .lock_mi_rip_u, - .lock_mi_sib_s, - .lock_mi_rip_s, - .lock_mr_sib, - .lock_mr_rip, - .lock_moffs_rax, - => .lock, - }, switch (inst.tag) { - inline else => |tag| if (@hasField(Mnemonic, @tagName(tag))) - @field(Mnemonic, @tagName(tag)) +fn generic(lower: *Lower, inst: Mir.Inst) Error!void { + const fixes = switch (inst.ops) { + .none => inst.data.none.fixes, + .inst => inst.data.inst.fixes, + .i_s, .i_u => inst.data.i.fixes, + .r => inst.data.r.fixes, + .rr => inst.data.rr.fixes, + .rrr => inst.data.rrr.fixes, + .rrri => inst.data.rrri.fixes, + .rri_s, .rri_u => inst.data.rri.fixes, + .ri_s, .ri_u => inst.data.ri.fixes, + .ri64, .rm_sib, .rm_rip, .mr_sib, .mr_rip => inst.data.rx.fixes, + .mi_sib_u, .mi_rip_u, .mi_sib_s, .mi_rip_s => ._, + .mrr_sib, .mrr_rip, .rrm_sib, .rrm_rip => inst.data.rrx.fixes, + .rmi_sib, .rmi_rip, .mri_sib, .mri_rip => inst.data.rix.fixes, + .rrmi_sib, .rrmi_rip => inst.data.rrix.fixes, + .m_sib, .m_rip, .rax_moffs, .moffs_rax => inst.data.x.fixes, + .extern_fn_reloc, .got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ._, + else => return lower.fail("TODO lower .{s}", .{@tagName(inst.ops)}), + }; + try lower.emit(switch (fixes) { + inline else => |tag| comptime if (std.mem.indexOfScalar(u8, @tagName(tag), ' ')) |space| + @field(Prefix, @tagName(tag)[0..space]) else - unreachable, + .none, + }, mnemonic: { + comptime var max_len = 0; + inline for (@typeInfo(Mnemonic).Enum.fields) |field| max_len = @max(field.name.len, max_len); + var buf: [max_len]u8 = undefined; + + const fixes_name = @tagName(fixes); + const pattern = fixes_name[if (std.mem.indexOfScalar(u8, fixes_name, ' ')) |i| i + 1 else 0..]; + const wildcard_i = std.mem.indexOfScalar(u8, pattern, '_').?; + const parts = .{ pattern[0..wildcard_i], @tagName(inst.tag), pattern[wildcard_i + 1 ..] }; + const err_msg = "unsupported mnemonic: "; + const mnemonic = std.fmt.bufPrint(&buf, "{s}{s}{s}", parts) catch + return lower.fail(err_msg ++ "'{s}{s}{s}'", parts); + break :mnemonic std.meta.stringToEnum(Mnemonic, mnemonic) orelse + return lower.fail(err_msg ++ "'{s}'", .{mnemonic}); }, switch (inst.ops) { .none => &.{}, + .inst => &.{ + .{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) }, + }, .i_s, .i_u => &.{ - .{ .imm = lower.imm(inst.ops, inst.data.i) }, + .{ .imm = lower.imm(inst.ops, inst.data.i.i) }, }, .r => &.{ - .{ .reg = inst.data.r }, + .{ .reg = inst.data.r.r1 }, }, .rr => &.{ .{ .reg = inst.data.rr.r1 }, @@ -501,11 +340,11 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { .{ .imm = lower.imm(inst.ops, inst.data.rrri.i) }, }, .ri_s, .ri_u => &.{ - .{ .reg = inst.data.ri.r }, + .{ .reg = inst.data.ri.r1 }, .{ .imm = lower.imm(inst.ops, inst.data.ri.i) }, }, .ri64 => &.{ - .{ .reg = inst.data.rx.r }, + .{ .reg = inst.data.rx.r1 }, .{ .imm = lower.imm(inst.ops, inst.data.rx.payload) }, }, .rri_s, .rri_u => &.{ @@ -513,33 +352,25 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { .{ .reg = inst.data.rri.r2 }, .{ .imm = lower.imm(inst.ops, inst.data.rri.i) }, }, - .m_sib, .lock_m_sib, .m_rip, .lock_m_rip => &.{ - .{ .mem = lower.mem(inst.ops, inst.data.payload) }, + .m_sib, .m_rip => &.{ + .{ .mem = lower.mem(inst.ops, inst.data.x.payload) }, }, - .mi_sib_s, - .lock_mi_sib_s, - .mi_sib_u, - .lock_mi_sib_u, - .mi_rip_u, - .lock_mi_rip_u, - .mi_rip_s, - .lock_mi_rip_s, - => &.{ + .mi_sib_s, .mi_sib_u, .mi_rip_u, .mi_rip_s => &.{ .{ .mem = lower.mem(inst.ops, inst.data.ix.payload) }, .{ .imm = lower.imm(inst.ops, inst.data.ix.i) }, }, .rm_sib, .rm_rip => &.{ - .{ .reg = inst.data.rx.r }, + .{ .reg = inst.data.rx.r1 }, .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, }, .rmi_sib, .rmi_rip => &.{ - .{ .reg = inst.data.rix.r }, + .{ .reg = inst.data.rix.r1 }, .{ .mem = lower.mem(inst.ops, inst.data.rix.payload) }, .{ .imm = lower.imm(inst.ops, inst.data.rix.i) }, }, - .mr_sib, .lock_mr_sib, .mr_rip, .lock_mr_rip => &.{ + .mr_sib, .mr_rip => &.{ .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, - .{ .reg = inst.data.rx.r }, + .{ .reg = inst.data.rx.r1 }, }, .mrr_sib, .mrr_rip => &.{ .{ .mem = lower.mem(inst.ops, inst.data.rrx.payload) }, @@ -548,7 +379,7 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { }, .mri_sib, .mri_rip => &.{ .{ .mem = lower.mem(inst.ops, inst.data.rix.payload) }, - .{ .reg = inst.data.rix.r }, + .{ .reg = inst.data.rix.r1 }, .{ .imm = lower.imm(inst.ops, inst.data.rix.i) }, }, .rrm_sib, .rrm_rip => &.{ @@ -562,180 +393,46 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { .{ .mem = lower.mem(inst.ops, inst.data.rrix.payload) }, .{ .imm = lower.imm(inst.ops, inst.data.rrix.i) }, }, - else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), - }); -} - -fn mirString(lower: *Lower, inst: Mir.Inst) Error!void { - switch (inst.ops) { - .string => try lower.emitInst(switch (inst.data.string.repeat) { - inline else => |repeat| @field(Prefix, @tagName(repeat)), - }, switch (inst.tag) { - inline .cmps, .lods, .movs, .scas, .stos => |tag| switch (inst.data.string.width) { - inline else => |width| @field(Mnemonic, @tagName(tag) ++ @tagName(width)), - }, - else => unreachable, - }, &.{}), - else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), - } -} - -fn mirCmpxchgBytes(lower: *Lower, inst: Mir.Inst) Error!void { - const ops: [1]Operand = switch (inst.ops) { - .m_sib, .lock_m_sib, .m_rip, .lock_m_rip => .{ - .{ .mem = lower.mem(inst.ops, inst.data.payload) }, - }, - else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), - }; - try lower.emitInst(switch (inst.ops) { - .m_sib, .m_rip => .none, - .lock_m_sib, .lock_m_rip => .lock, - else => unreachable, - }, switch (@divExact(ops[0].bitSize(), 8)) { - 8 => .cmpxchg8b, - 16 => .cmpxchg16b, - else => return lower.fail("invalid operand for {s}", .{@tagName(inst.tag)}), - }, &ops); -} - -fn mirMovMoffs(lower: *Lower, inst: Mir.Inst) Error!void { - try lower.emitInst(switch (inst.ops) { - .rax_moffs, .moffs_rax => .none, - .lock_moffs_rax => .lock, - else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), - }, .mov, switch (inst.ops) { .rax_moffs => &.{ .{ .reg = .rax }, - .{ .mem = lower.mem(inst.ops, inst.data.payload) }, + .{ .mem = lower.mem(inst.ops, inst.data.x.payload) }, }, - .moffs_rax, .lock_moffs_rax => &.{ - .{ .mem = lower.mem(inst.ops, inst.data.payload) }, + .moffs_rax => &.{ + .{ .mem = lower.mem(inst.ops, inst.data.x.payload) }, .{ .reg = .rax }, }, - else => unreachable, - }); -} - -fn mirMovsx(lower: *Lower, inst: Mir.Inst) Error!void { - const ops: [2]Operand = switch (inst.ops) { - .rr => .{ - .{ .reg = inst.data.rr.r1 }, - .{ .reg = inst.data.rr.r2 }, - }, - .rm_sib, .rm_rip => .{ - .{ .reg = inst.data.rx.r }, - .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, - }, - else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), - }; - try lower.emitInst(.none, switch (ops[0].bitSize()) { - 32, 64 => switch (ops[1].bitSize()) { - 32 => .movsxd, - else => .movsx, - }, - else => .movsx, - }, &ops); -} - -fn mirCmovcc(lower: *Lower, inst: Mir.Inst) Error!void { - const data: struct { cc: bits.Condition, ops: [2]Operand } = switch (inst.ops) { - .rr_cc => .{ .cc = inst.data.rr_cc.cc, .ops = .{ - .{ .reg = inst.data.rr_cc.r1 }, - .{ .reg = inst.data.rr_cc.r2 }, - } }, - .rm_sib_cc, .rm_rip_cc => .{ .cc = inst.data.rx_cc.cc, .ops = .{ - .{ .reg = inst.data.rx_cc.r }, - .{ .mem = lower.mem(inst.ops, inst.data.rx_cc.payload) }, - } }, - else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), - }; - switch (data.cc) { - else => |cc| try lower.emitInst(.none, mnem_cc(.cmov, cc), &data.ops), - .z_and_np => { - try lower.emitInst(.none, mnem_cc(.cmov, .nz), &.{ data.ops[1], data.ops[0] }); - try lower.emitInst(.none, mnem_cc(.cmov, .np), &data.ops); + .extern_fn_reloc => &.{ + .{ .imm = lower.reloc(.{ .linker_extern_fn = inst.data.reloc }) }, }, - .nz_or_p => { - try lower.emitInst(.none, mnem_cc(.cmov, .nz), &data.ops); - try lower.emitInst(.none, mnem_cc(.cmov, .p), &data.ops); + .got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ops: { + const reg = inst.data.rx.r1; + const extra = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data; + _ = lower.reloc(switch (inst.ops) { + .got_reloc => .{ .linker_got = extra }, + .direct_reloc => .{ .linker_direct = extra }, + .import_reloc => .{ .linker_import = extra }, + .tlv_reloc => .{ .linker_tlv = extra }, + else => unreachable, + }); + break :ops &.{ + .{ .reg = reg }, + .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) }, + }; }, - } -} - -fn mirSetcc(lower: *Lower, inst: Mir.Inst) Error!void { - const data: struct { cc: bits.Condition, ops: [2]Operand } = switch (inst.ops) { - .r_cc => .{ .cc = inst.data.r_cc.cc, .ops = .{ - .{ .reg = inst.data.r_cc.r }, - .{ .reg = inst.data.r_cc.scratch }, - } }, - .m_sib_cc, .m_rip_cc => .{ .cc = inst.data.x_cc.cc, .ops = .{ - .{ .mem = lower.mem(inst.ops, inst.data.x_cc.payload) }, - .{ .reg = inst.data.x_cc.scratch }, - } }, else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }), - }; - switch (data.cc) { - else => |cc| try lower.emitInst(.none, mnem_cc(.set, cc), data.ops[0..1]), - .z_and_np => { - try lower.emitInst(.none, mnem_cc(.set, .z), data.ops[0..1]); - try lower.emitInst(.none, mnem_cc(.set, .np), data.ops[1..2]); - try lower.emitInst(.none, .@"and", data.ops[0..2]); - }, - .nz_or_p => { - try lower.emitInst(.none, mnem_cc(.set, .nz), data.ops[0..1]); - try lower.emitInst(.none, mnem_cc(.set, .p), data.ops[1..2]); - try lower.emitInst(.none, .@"or", data.ops[0..2]); - }, - } -} - -fn mirJcc(lower: *Lower, index: Mir.Inst.Index, inst: Mir.Inst) Error!void { - switch (inst.data.inst_cc.cc) { - else => |cc| try lower.emitInstWithReloc(.none, mnem_cc(.j, cc), &.{ - .{ .imm = Immediate.s(0) }, - }, .{ .inst = inst.data.inst_cc.inst }), - .z_and_np => { - try lower.emitInstWithReloc(.none, mnem_cc(.j, .nz), &.{ - .{ .imm = Immediate.s(0) }, - }, .{ .inst = index + 1 }); - try lower.emitInstWithReloc(.none, mnem_cc(.j, .np), &.{ - .{ .imm = Immediate.s(0) }, - }, .{ .inst = inst.data.inst_cc.inst }); - }, - .nz_or_p => { - try lower.emitInstWithReloc(.none, mnem_cc(.j, .nz), &.{ - .{ .imm = Immediate.s(0) }, - }, .{ .inst = inst.data.inst_cc.inst }); - try lower.emitInstWithReloc(.none, mnem_cc(.j, .p), &.{ - .{ .imm = Immediate.s(0) }, - }, .{ .inst = inst.data.inst_cc.inst }); - }, - } + }); } -fn mirRegisterList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Error!void { - const reg_list = Mir.RegisterList.fromInt(inst.data.payload); +fn pushPopRegList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Error!void { const callee_preserved_regs = abi.getCalleePreservedRegs(lower.target.*); - var it = reg_list.iterator(.{ .direction = switch (mnemonic) { + var it = inst.data.reg_list.iterator(.{ .direction = switch (mnemonic) { .push => .reverse, .pop => .forward, else => unreachable, } }); - while (it.next()) |i| try lower.emitInst(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }}); -} - -fn mirLinker(lower: *Lower, mnemonic: Mnemonic, inst: Mir.Inst) Error!void { - const reloc = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data; - try lower.emitInstWithReloc(.none, mnemonic, &.{ - .{ .reg = inst.data.rx.r }, - .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(inst.data.rx.r.bitSize()), 0) }, - }, switch (inst.ops) { - .got_reloc => .{ .linker_got = reloc }, - .direct_reloc => .{ .linker_direct = reloc }, - .import_reloc => .{ .linker_import = reloc }, - .tlv_reloc => .{ .linker_tlv = reloc }, - else => unreachable, - }); + while (it.next()) |i| try lower.emit(.none, mnemonic, &.{.{ + .reg = callee_preserved_regs[i], + }}); } const abi = @import("abi.zig"); diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 442cfabebb..951a0c5d4d 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -32,6 +32,210 @@ pub const Inst = struct { pub const Index = u32; + pub const Fixes = enum(u8) { + /// ___ + @"_", + + /// ___ Above + _a, + /// ___ Above Or Equal + _ae, + /// ___ Below + _b, + /// ___ Below Or Equal + _be, + /// ___ Carry + _c, + /// ___ Equal + _e, + /// ___ Greater + _g, + /// ___ Greater Or Equal + _ge, + /// ___ Less + _l, + /// ___ Less Or Equal + _le, + /// ___ Not Above + _na, + /// ___ Not Above Or Equal + _nae, + /// ___ Not Below + _nb, + /// ___ Not Below Or Equal + _nbe, + /// ___ Not Carry + _nc, + /// ___ Not Equal + _ne, + /// ___ Not Greater + _ng, + /// ___ Not Greater Or Equal + _nge, + /// ___ Not Less + _nl, + /// ___ Not Less Or Equal + _nle, + /// ___ Not Overflow + _no, + /// ___ Not Parity + _np, + /// ___ Not Sign + _ns, + /// ___ Not Zero + _nz, + /// ___ Overflow + _o, + /// ___ Parity + _p, + /// ___ Parity Even + _pe, + /// ___ Parity Odd + _po, + /// ___ Sign + _s, + /// ___ Zero + _z, + + /// ___ String + //_s, + /// ___ String Byte + _sb, + /// ___ String Word + _sw, + /// ___ String Doubleword + _sd, + /// ___ String Quadword + _sq, + + /// Repeat ___ String + @"rep _s", + /// Repeat ___ String Byte + @"rep _sb", + /// Repeat ___ String Word + @"rep _sw", + /// Repeat ___ String Doubleword + @"rep _sd", + /// Repeat ___ String Quadword + @"rep _sq", + + /// Repeat Equal ___ String + @"repe _s", + /// Repeat Equal ___ String Byte + @"repe _sb", + /// Repeat Equal ___ String Word + @"repe _sw", + /// Repeat Equal ___ String Doubleword + @"repe _sd", + /// Repeat Equal ___ String Quadword + @"repe _sq", + + /// Repeat Not Equal ___ String + @"repne _s", + /// Repeat Not Equal ___ String Byte + @"repne _sb", + /// Repeat Not Equal ___ String Word + @"repne _sw", + /// Repeat Not Equal ___ String Doubleword + @"repne _sd", + /// Repeat Not Equal ___ String Quadword + @"repne _sq", + + /// Repeat Not Zero ___ String + @"repnz _s", + /// Repeat Not Zero ___ String Byte + @"repnz _sb", + /// Repeat Not Zero ___ String Word + @"repnz _sw", + /// Repeat Not Zero ___ String Doubleword + @"repnz _sd", + /// Repeat Not Zero ___ String Quadword + @"repnz _sq", + + /// Repeat Zero ___ String + @"repz _s", + /// Repeat Zero ___ String Byte + @"repz _sb", + /// Repeat Zero ___ String Word + @"repz _sw", + /// Repeat Zero ___ String Doubleword + @"repz _sd", + /// Repeat Zero ___ String Quadword + @"repz _sq", + + /// Locked ___ + @"lock _", + /// ___ 8 Bytes + _8b, + /// Locked ___ 8 Bytes + @"lock _8b", + /// ___ 16 Bytes + _16b, + /// Locked ___ 16 Bytes + @"lock _16b", + + /// Packed ___ + p_, + /// Packed ___ Byte + p_b, + /// Packed ___ Word + p_w, + /// Packed ___ Doubleword + p_d, + /// Packed ___ Quadword + p_q, + /// Packed ___ Double Quadword + p_dq, + + /// ___ Scalar Single-Precision Values + _ss, + /// ___ Packed Single-Precision Values + _ps, + /// ___ Scalar Double-Precision Values + //_sd, + /// ___ Packed Double-Precision Values + _pd, + + /// VEX-Encoded ___ + v_, + /// VEX-Encoded Packed ___ + vp_, + /// VEX-Encoded Packed ___ Byte + vp_b, + /// VEX-Encoded Packed ___ Word + vp_w, + /// VEX-Encoded Packed ___ Doubleword + vp_d, + /// VEX-Encoded Packed ___ Quadword + vp_q, + /// VEX-Encoded Packed ___ Double Quadword + vp_dq, + /// VEX-Encoded ___ Scalar Single-Precision Values + v_ss, + /// VEX-Encoded ___ Packed Single-Precision Values + v_ps, + /// VEX-Encoded ___ Scalar Double-Precision Values + v_sd, + /// VEX-Encoded ___ Packed Double-Precision Values + v_pd, + + /// Mask ___ Byte + k_b, + /// Mask ___ Word + k_w, + /// Mask ___ Doubleword + k_d, + /// Mask ___ Quadword + k_q, + + pub fn fromCondition(cc: bits.Condition) Fixes { + return switch (cc) { + inline else => |cc_tag| @field(Fixes, "_" ++ @tagName(cc_tag)), + .z_and_np, .nz_or_p => unreachable, + }; + } + }; + pub const Tag = enum(u8) { /// Add with carry adc, @@ -57,22 +261,24 @@ pub const Inst = struct { call, /// Convert byte to word cbw, - /// Convert word to doubleword - cwde, - /// Convert doubleword to quadword - cdqe, - /// Convert word to doubleword - cwd, /// Convert doubleword to quadword cdq, /// Convert doubleword to quadword - cqo, + cdqe, + /// Conditional move + cmov, /// Logical compare + /// Compare string cmp, /// Compare and exchange - cmpxchg, /// Compare and exchange bytes - cmpxchgb, + cmpxchg, + /// Convert doubleword to quadword + cqo, + /// Convert word to doubleword + cwd, + /// Convert word to doubleword + cwde, /// Unsigned division div, /// Store integer with truncation @@ -85,10 +291,14 @@ pub const Inst = struct { imul, /// int3, + /// Conditional jump + j, /// Jump jmp, /// Load effective address lea, + /// Load string + lod, /// Load fence lfence, /// Count the number of leading zero bits @@ -96,6 +306,7 @@ pub const Inst = struct { /// Memory fence mfence, /// Move + /// Move data from string to string mov, /// Move data after swapping bytes movbe, @@ -105,6 +316,8 @@ pub const Inst = struct { movq, /// Move with sign extension movsx, + /// Move with sign extension + movsxd, /// Move with zero extension movzx, /// Multiply @@ -139,6 +352,10 @@ pub const Inst = struct { sar, /// Integer subtraction with borrow sbb, + /// Scan string + sca, + /// Set byte on condition + set, /// Store fence sfence, /// Logical shift left @@ -151,6 +368,8 @@ pub const Inst = struct { shrd, /// Subtract sub, + /// Store string + sto, /// Syscall syscall, /// Test condition @@ -505,57 +724,10 @@ pub const Inst = struct { /// Fused multiply-add of scalar single-precision floating-point values vfmadd231ss, - /// Compare string operands - cmps, - /// Load string - lods, - /// Move data from string to string - movs, - /// Scan string - scas, - /// Store string - stos, - - /// Conditional move - cmovcc, - /// Conditional jump - jcc, - /// Set byte on condition - setcc, - - /// Mov absolute to/from memory wrt segment register to/from rax - mov_moffs, - - /// Jump with relocation to another local MIR instruction - /// Uses `inst` payload. - jmp_reloc, - - /// Call to an extern symbol via linker relocation. - /// Uses `relocation` payload. - call_extern, - - /// Load effective address of a symbol not yet allocated in VM. - lea_linker, - /// Move address of a symbol not yet allocated in VM. - mov_linker, - - /// End of prologue - dbg_prologue_end, - /// Start of epilogue - dbg_epilogue_begin, - /// Update debug line - /// Uses `line_column` payload containing the line and column. - dbg_line, - /// Push registers - /// Uses `payload` payload containing `RegisterList.asInt` directly. - push_regs, - /// Pop registers - /// Uses `payload` payload containing `RegisterList.asInt` directly. - pop_regs, - - /// Tombstone - /// Emitter should skip this instruction. - dead, + /// A pseudo instruction that requires special lowering. + /// This should be the only tag in this enum that doesn't + /// directly correspond to one or more instruction mnemonics. + pseudo, }; pub const Ops = enum(u8) { @@ -579,12 +751,6 @@ pub const Inst = struct { /// Register, register, immediate (unsigned) operands. /// Uses `rri` payload. rri_u, - /// Register with condition code (CC). - /// Uses `r_cc` payload. - r_cc, - /// Register, register with condition code (CC). - /// Uses `rr_cc` payload. - rr_cc, /// Register, immediate (sign-extended) operands. /// Uses `ri` payload. ri_s, @@ -609,12 +775,6 @@ pub const Inst = struct { /// Register, memory (RIP) operands. /// Uses `rx` payload. rm_rip, - /// Register, memory (SIB) operands with condition code (CC). - /// Uses `rx_cc` payload. - rm_sib_cc, - /// Register, memory (RIP) operands with condition code (CC). - /// Uses `rx_cc` payload. - rm_rip_cc, /// Register, memory (SIB), immediate (byte) operands. /// Uses `rix` payload with extra data of type `MemorySib`. rmi_sib, @@ -634,17 +794,11 @@ pub const Inst = struct { /// Uses `rix` payload with extra data of type `MemoryRip`. rmi_rip, /// Single memory (SIB) operand. - /// Uses `payload` with extra data of type `MemorySib`. + /// Uses `x` with extra data of type `MemorySib`. m_sib, /// Single memory (RIP) operand. - /// Uses `payload` with extra data of type `MemoryRip`. + /// Uses `x` with extra data of type `MemoryRip`. m_rip, - /// Single memory (SIB) operand with condition code (CC). - /// Uses `x_cc` with extra data of type `MemorySib`. - m_sib_cc, - /// Single memory (RIP) operand with condition code (CC). - /// Uses `x_cc` with extra data of type `MemoryRip`. - m_rip_cc, /// Memory (SIB), immediate (unsigned) operands. /// Uses `ix` payload with extra data of type `MemorySib`. mi_sib_u, @@ -676,49 +830,17 @@ pub const Inst = struct { /// Uses `rix` payload with extra data of type `MemoryRip`. mri_rip, /// Rax, Memory moffs. - /// Uses `payload` with extra data of type `MemoryMoffs`. + /// Uses `x` with extra data of type `MemoryMoffs`. rax_moffs, /// Memory moffs, rax. - /// Uses `payload` with extra data of type `MemoryMoffs`. + /// Uses `x` with extra data of type `MemoryMoffs`. moffs_rax, - /// Single memory (SIB) operand with lock prefix. - /// Uses `payload` with extra data of type `MemorySib`. - lock_m_sib, - /// Single memory (RIP) operand with lock prefix. - /// Uses `payload` with extra data of type `MemoryRip`. - lock_m_rip, - /// Memory (SIB), immediate (unsigned) operands with lock prefix. - /// Uses `xi` payload with extra data of type `MemorySib`. - lock_mi_sib_u, - /// Memory (RIP), immediate (unsigned) operands with lock prefix. - /// Uses `xi` payload with extra data of type `MemoryRip`. - lock_mi_rip_u, - /// Memory (SIB), immediate (sign-extend) operands with lock prefix. - /// Uses `xi` payload with extra data of type `MemorySib`. - lock_mi_sib_s, - /// Memory (RIP), immediate (sign-extend) operands with lock prefix. - /// Uses `xi` payload with extra data of type `MemoryRip`. - lock_mi_rip_s, - /// Memory (SIB), register operands with lock prefix. - /// Uses `rx` payload with extra data of type `MemorySib`. - lock_mr_sib, - /// Memory (RIP), register operands with lock prefix. - /// Uses `rx` payload with extra data of type `MemoryRip`. - lock_mr_rip, - /// Memory moffs, rax with lock prefix. - /// Uses `payload` with extra data of type `MemoryMoffs`. - lock_moffs_rax, /// References another Mir instruction directly. /// Uses `inst` payload. inst, - /// References another Mir instruction directly with condition code (CC). - /// Uses `inst_cc` payload. - inst_cc, - /// String repeat and width - /// Uses `string` payload. - string, + /// Linker relocation - external function. /// Uses `reloc` payload. - reloc, + extern_fn_reloc, /// Linker relocation - GOT indirection. /// Uses `rx` payload with extra data of type `Reloc`. got_reloc, @@ -731,74 +853,125 @@ pub const Inst = struct { /// Linker relocation - threadlocal variable via GOT indirection. /// Uses `rx` payload with extra data of type `Reloc`. tlv_reloc, + + // Pseudo instructions: + + /// Conditional move if zero flag set and parity flag not set + /// Clobbers the source operand! + /// Uses `rr` payload. + pseudo_cmov_z_and_np_rr, + /// Conditional move if zero flag not set or parity flag set + /// Uses `rr` payload. + pseudo_cmov_nz_or_p_rr, + /// Conditional move if zero flag not set or parity flag set + /// Uses `rx` payload. + pseudo_cmov_nz_or_p_rm_sib, + /// Conditional move if zero flag not set or parity flag set + /// Uses `rx` payload. + pseudo_cmov_nz_or_p_rm_rip, + /// Set byte if zero flag set and parity flag not set + /// Requires a scratch register! + /// Uses `r_scratch` payload. + pseudo_set_z_and_np_r, + /// Set byte if zero flag set and parity flag not set + /// Requires a scratch register! + /// Uses `x_scratch` payload. + pseudo_set_z_and_np_m_sib, + /// Set byte if zero flag set and parity flag not set + /// Requires a scratch register! + /// Uses `x_scratch` payload. + pseudo_set_z_and_np_m_rip, + /// Set byte if zero flag not set or parity flag set + /// Requires a scratch register! + /// Uses `r_scratch` payload. + pseudo_set_nz_or_p_r, + /// Set byte if zero flag not set or parity flag set + /// Requires a scratch register! + /// Uses `x_scratch` payload. + pseudo_set_nz_or_p_m_sib, + /// Set byte if zero flag not set or parity flag set + /// Requires a scratch register! + /// Uses `x_scratch` payload. + pseudo_set_nz_or_p_m_rip, + /// Jump if zero flag set and parity flag not set + /// Uses `inst` payload. + pseudo_j_z_and_np_inst, + /// Jump if zero flag not set or parity flag set + /// Uses `inst` payload. + pseudo_j_nz_or_p_inst, + + /// Push registers + /// Uses `reg_list` payload. + pseudo_push_reg_list, + /// Pop registers + /// Uses `reg_list` payload. + pseudo_pop_reg_list, + + /// End of prologue + pseudo_dbg_prologue_end_none, + /// Update debug line + /// Uses `line_column` payload. + pseudo_dbg_line_line_column, + /// Start of epilogue + pseudo_dbg_epilogue_begin_none, + + /// Tombstone + /// Emitter should skip this instruction. + pseudo_dead_none, }; pub const Data = union { + none: struct { + fixes: Fixes = ._, + }, /// References another Mir instruction. - inst: Index, - /// Another instruction with condition code (CC). - /// Used by `jcc`. - inst_cc: struct { - /// Another instruction. + inst: struct { + fixes: Fixes = ._, inst: Index, - /// A condition code for use with EFLAGS register. - cc: bits.Condition, }, /// A 32-bit immediate value. - i: u32, - r: Register, + i: struct { + fixes: Fixes = ._, + i: u32, + }, + r: struct { + fixes: Fixes = ._, + r1: Register, + }, rr: struct { + fixes: Fixes = ._, r1: Register, r2: Register, }, rrr: struct { + fixes: Fixes = ._, r1: Register, r2: Register, r3: Register, }, rrri: struct { + fixes: Fixes = ._, r1: Register, r2: Register, r3: Register, i: u8, }, rri: struct { + fixes: Fixes = ._, r1: Register, r2: Register, i: u32, }, - /// Condition code (CC), followed by custom payload found in extra. - x_cc: struct { - scratch: Register, - cc: bits.Condition, - payload: u32, - }, - /// Register with condition code (CC). - r_cc: struct { - r: Register, - scratch: Register, - cc: bits.Condition, - }, - /// Register, register with condition code (CC). - rr_cc: struct { - r1: Register, - r2: Register, - cc: bits.Condition, - }, /// Register, immediate. ri: struct { - r: Register, + fixes: Fixes = ._, + r1: Register, i: u32, }, /// Register, followed by custom payload found in extra. rx: struct { - r: Register, - payload: u32, - }, - /// Register with condition code (CC), followed by custom payload found in extra. - rx_cc: struct { - r: Register, - cc: bits.Condition, + fixes: Fixes = ._, + r1: Register, payload: u32, }, /// Immediate, followed by Custom payload found in extra. @@ -808,39 +981,54 @@ pub const Inst = struct { }, /// Register, register, followed by Custom payload found in extra. rrx: struct { + fixes: Fixes = ._, r1: Register, r2: Register, payload: u32, }, /// Register, byte immediate, followed by Custom payload found in extra. rix: struct { - r: Register, + fixes: Fixes = ._, + r1: Register, i: u8, payload: u32, }, /// Register, register, byte immediate, followed by Custom payload found in extra. rrix: struct { + fixes: Fixes = ._, r1: Register, r2: Register, i: u8, payload: u32, }, - /// String instruction prefix and width. - string: struct { - repeat: bits.StringRepeat, - width: bits.StringWidth, + /// Register, scratch register + r_scratch: struct { + fixes: Fixes = ._, + r1: Register, + scratch_reg: Register, + }, + /// Scratch register, followed by Custom payload found in extra. + x_scratch: struct { + fixes: Fixes = ._, + scratch_reg: Register, + payload: u32, + }, + /// Custom payload found in extra. + x: struct { + fixes: Fixes = ._, + payload: u32, }, /// Relocation for the linker where: /// * `atom_index` is the index of the source /// * `sym_index` is the index of the target - relocation: Reloc, + reloc: Reloc, /// Debug line and column position line_column: struct { line: u32, column: u32, }, - /// Index into `extra`. Meaning of what can be found there is context-dependent. - payload: u32, + /// Register list + reg_list: RegisterList, }; // Make sure we don't accidentally make instructions bigger than expected. @@ -852,6 +1040,7 @@ pub const Inst = struct { } }; +/// A linker symbol not yet allocated in VM. pub const Reloc = struct { /// Index of the containing atom. atom_index: u32, @@ -887,16 +1076,6 @@ pub const RegisterList = struct { return self.bitset.iterator(options); } - pub fn asInt(self: Self) u32 { - return self.bitset.mask; - } - - pub fn fromInt(mask: u32) Self { - return .{ - .bitset = BitSet{ .mask = @intCast(BitSet.MaskInt, mask) }, - }; - } - pub fn count(self: Self) u32 { return @intCast(u32, self.bitset.count()); } diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig index b73a37d6cb..3343f280b9 100644 --- a/src/arch/x86_64/bits.zig +++ b/src/arch/x86_64/bits.zig @@ -6,9 +6,6 @@ const Allocator = std.mem.Allocator; const ArrayList = std.ArrayList; const DW = std.dwarf; -pub const StringRepeat = enum(u3) { none, rep, repe, repz, repne, repnz }; -pub const StringWidth = enum(u2) { b, w, d, q }; - /// EFLAGS condition codes pub const Condition = enum(u5) { /// above -- cgit v1.2.3 From ecb5feaf94bf49dc4c180f09c170223d6c1898b3 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 May 2023 06:51:05 -0400 Subject: x86_64: continue to optimize mir tag usage Migrate mnemonic literals to tuples that represent the compressed storage. 225 tags left in use, many tags left to compress. --- src/arch/x86_64/CodeGen.zig | 1469 +++++++++++++++++++++++-------------------- src/arch/x86_64/Lower.zig | 9 +- src/arch/x86_64/Mir.zig | 100 +-- 3 files changed, 870 insertions(+), 708 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 3ac05c95ac..147be62e28 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1167,11 +1167,13 @@ fn asmPlaceholder(self: *Self) !Mir.Inst.Index { }); } -fn asmOpOnly(self: *Self, tag: Mir.Inst.Tag) !void { +fn asmOpOnly(self: *Self, tag: Mir.Inst.FixedTag) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = .none, - .data = undefined, + .data = .{ .none = .{ + .fixes = tag[0], + } }, }); } @@ -1183,22 +1185,26 @@ fn asmPseudo(self: *Self, ops: Mir.Inst.Ops) !void { }); } -fn asmRegister(self: *Self, tag: Mir.Inst.Tag, reg: Register) !void { +fn asmRegister(self: *Self, tag: Mir.Inst.FixedTag, reg: Register) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = .r, - .data = .{ .r = .{ .r1 = reg } }, + .data = .{ .r = .{ + .fixes = tag[0], + .r1 = reg, + } }, }); } -fn asmImmediate(self: *Self, tag: Mir.Inst.Tag, imm: Immediate) !void { +fn asmImmediate(self: *Self, tag: Mir.Inst.FixedTag, imm: Immediate) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (imm) { .signed => .i_s, .unsigned => .i_u, }, .data = .{ .i = .{ + .fixes = tag[0], .i = switch (imm) { .signed => |s| @bitCast(u32, s), .unsigned => |u| @intCast(u32, u), @@ -1207,24 +1213,29 @@ fn asmImmediate(self: *Self, tag: Mir.Inst.Tag, imm: Immediate) !void { }); } -fn asmRegisterRegister(self: *Self, tag: Mir.Inst.Tag, reg1: Register, reg2: Register) !void { +fn asmRegisterRegister(self: *Self, tag: Mir.Inst.FixedTag, reg1: Register, reg2: Register) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = .rr, - .data = .{ .rr = .{ .r1 = reg1, .r2 = reg2 } }, + .data = .{ .rr = .{ + .fixes = tag[0], + .r1 = reg1, + .r2 = reg2, + } }, }); } -fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.Tag, reg: Register, imm: Immediate) !void { +fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.FixedTag, reg: Register, imm: Immediate) !void { const ops: Mir.Inst.Ops = switch (imm) { .signed => .ri_s, .unsigned => |u| if (math.cast(u32, u)) |_| .ri_u else .ri64, }; _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = ops, .data = switch (ops) { .ri_s, .ri_u => .{ .ri = .{ + .fixes = tag[0], .r1 = reg, .i = switch (imm) { .signed => |s| @bitCast(u32, s), @@ -1232,6 +1243,7 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.Tag, reg: Register, imm: Imme }, } }, .ri64 => .{ .rx = .{ + .fixes = tag[0], .r1 = reg, .payload = try self.addExtra(Mir.Imm64.encode(imm.unsigned)), } }, @@ -1242,47 +1254,59 @@ fn asmRegisterImmediate(self: *Self, tag: Mir.Inst.Tag, reg: Register, imm: Imme fn asmRegisterRegisterRegister( self: *Self, - tag: Mir.Inst.Tag, + tag: Mir.Inst.FixedTag, reg1: Register, reg2: Register, reg3: Register, ) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = .rrr, - .data = .{ .rrr = .{ .r1 = reg1, .r2 = reg2, .r3 = reg3 } }, + .data = .{ .rrr = .{ + .fixes = tag[0], + .r1 = reg1, + .r2 = reg2, + .r3 = reg3, + } }, }); } fn asmRegisterRegisterRegisterImmediate( self: *Self, - tag: Mir.Inst.Tag, + tag: Mir.Inst.FixedTag, reg1: Register, reg2: Register, reg3: Register, imm: Immediate, ) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = .rrri, - .data = .{ .rrri = .{ .r1 = reg1, .r2 = reg2, .r3 = reg3, .i = @intCast(u8, imm.unsigned) } }, + .data = .{ .rrri = .{ + .fixes = tag[0], + .r1 = reg1, + .r2 = reg2, + .r3 = reg3, + .i = @intCast(u8, imm.unsigned), + } }, }); } fn asmRegisterRegisterImmediate( self: *Self, - tag: Mir.Inst.Tag, + tag: Mir.Inst.FixedTag, reg1: Register, reg2: Register, imm: Immediate, ) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (imm) { .signed => .rri_s, .unsigned => .rri_u, }, .data = .{ .rri = .{ + .fixes = tag[0], .r1 = reg1, .r2 = reg2, .i = switch (imm) { @@ -1295,19 +1319,20 @@ fn asmRegisterRegisterImmediate( fn asmRegisterRegisterMemory( self: *Self, - tag: Mir.Inst.Tag, + tag: Mir.Inst.FixedTag, reg1: Register, reg2: Register, m: Memory, ) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (m) { .sib => .rrm_sib, .rip => .rrm_rip, else => unreachable, }, .data = .{ .rrx = .{ + .fixes = tag[0], .r1 = reg1, .r2 = reg2, .payload = switch (m) { @@ -1319,15 +1344,16 @@ fn asmRegisterRegisterMemory( }); } -fn asmMemory(self: *Self, tag: Mir.Inst.Tag, m: Memory) !void { +fn asmMemory(self: *Self, tag: Mir.Inst.FixedTag, m: Memory) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (m) { .sib => .m_sib, .rip => .m_rip, else => unreachable, }, .data = .{ .x = .{ + .fixes = tag[0], .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), .rip => try self.addExtra(Mir.MemoryRip.encode(m)), @@ -1337,15 +1363,16 @@ fn asmMemory(self: *Self, tag: Mir.Inst.Tag, m: Memory) !void { }); } -fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) !void { +fn asmRegisterMemory(self: *Self, tag: Mir.Inst.FixedTag, reg: Register, m: Memory) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (m) { .sib => .rm_sib, .rip => .rm_rip, else => unreachable, }, .data = .{ .rx = .{ + .fixes = tag[0], .r1 = reg, .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), @@ -1358,19 +1385,20 @@ fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) ! fn asmRegisterMemoryImmediate( self: *Self, - tag: Mir.Inst.Tag, + tag: Mir.Inst.FixedTag, reg: Register, m: Memory, imm: Immediate, ) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (m) { .sib => .rmi_sib, .rip => .rmi_rip, else => unreachable, }, .data = .{ .rix = .{ + .fixes = tag[0], .r1 = reg, .i = @intCast(u8, imm.unsigned), .payload = switch (m) { @@ -1384,20 +1412,21 @@ fn asmRegisterMemoryImmediate( fn asmRegisterRegisterMemoryImmediate( self: *Self, - tag: Mir.Inst.Tag, + tag: Mir.Inst.FixedTag, reg1: Register, reg2: Register, m: Memory, imm: Immediate, ) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (m) { .sib => .rrmi_sib, .rip => .rrmi_rip, else => unreachable, }, .data = .{ .rrix = .{ + .fixes = tag[0], .r1 = reg1, .r2 = reg2, .i = @intCast(u8, imm.unsigned), @@ -1410,15 +1439,16 @@ fn asmRegisterRegisterMemoryImmediate( }); } -fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) !void { +fn asmMemoryRegister(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, reg: Register) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (m) { .sib => .mr_sib, .rip => .mr_rip, else => unreachable, }, .data = .{ .rx = .{ + .fixes = tag[0], .r1 = reg, .payload = switch (m) { .sib => try self.addExtra(Mir.MemorySib.encode(m)), @@ -1429,9 +1459,9 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) ! }); } -fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.Tag, m: Memory, imm: Immediate) !void { +fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, imm: Immediate) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (m) { .sib => switch (imm) { .signed => .mi_sib_s, @@ -1443,57 +1473,64 @@ fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.Tag, m: Memory, imm: Immediate) }, else => unreachable, }, - .data = .{ .ix = .{ - .i = switch (imm) { + .data = .{ .x = .{ + .fixes = tag[0], + .payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) { .signed => |s| @bitCast(u32, s), .unsigned => |u| @intCast(u32, u), - }, - .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }, + } }), } }, }); + _ = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }; } fn asmMemoryRegisterRegister( self: *Self, - tag: Mir.Inst.Tag, + tag: Mir.Inst.FixedTag, m: Memory, reg1: Register, reg2: Register, ) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (m) { .sib => .mrr_sib, .rip => .mrr_rip, else => unreachable, }, - .data = .{ .rrx = .{ .r1 = reg1, .r2 = reg2, .payload = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - } } }, + .data = .{ .rrx = .{ + .fixes = tag[0], + .r1 = reg1, + .r2 = reg2, + .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }, + } }, }); } fn asmMemoryRegisterImmediate( self: *Self, - tag: Mir.Inst.Tag, + tag: Mir.Inst.FixedTag, m: Memory, reg: Register, imm: Immediate, ) !void { _ = try self.addInst(.{ - .tag = tag, + .tag = tag[1], .ops = switch (m) { .sib => .mri_sib, .rip => .mri_rip, else => unreachable, }, .data = .{ .rix = .{ + .fixes = tag[0], .r1 = reg, .i = @intCast(u8, imm.unsigned), .payload = switch (m) { @@ -1508,9 +1545,9 @@ fn asmMemoryRegisterImmediate( fn gen(self: *Self) InnerError!void { const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { - try self.asmRegister(.push, .rbp); + try self.asmRegister(.{ ._, .push }, .rbp); const backpatch_push_callee_preserved_regs = try self.asmPlaceholder(); - try self.asmRegisterRegister(.mov, .rbp, .rsp); + try self.asmRegisterRegister(.{ ._, .mov }, .rbp, .rsp); const backpatch_frame_align = try self.asmPlaceholder(); const backpatch_stack_alloc = try self.asmPlaceholder(); @@ -1553,8 +1590,8 @@ fn gen(self: *Self) InnerError!void { try self.asmPseudo(.pseudo_dbg_epilogue_begin_none); const backpatch_stack_dealloc = try self.asmPlaceholder(); const backpatch_pop_callee_preserved_regs = try self.asmPlaceholder(); - try self.asmRegister(.pop, .rbp); - try self.asmOpOnly(.ret); + try self.asmRegister(.{ ._, .pop }, .rbp); + try self.asmOpOnly(.{ ._, .ret }); const frame_layout = try self.computeFrameLayout(); const need_frame_align = frame_layout.stack_mask != math.maxInt(u32); @@ -1927,7 +1964,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { }; const tag_val = Value.initPayload(&tag_pl.base); const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); - try self.genBinOpMir(.cmp, enum_ty, enum_mcv, tag_mcv); + try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv); const skip_reloc = try self.asmJccReloc(undefined, .ne); try self.genSetMem( @@ -1947,7 +1984,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { try self.airTrap(); for (exitlude_jump_relocs) |reloc| try self.performReloc(reloc); - try self.asmOpOnly(.ret); + try self.asmOpOnly(.{ ._, .ret }); }, else => return self.fail( "TODO implement {s} for {}", @@ -2406,7 +2443,7 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToTmpRegister(src_ty, src_mcv); try self.asmRegisterRegisterImmediate( - .vcvtps2ph, + .{ ._, .vcvtps2ph }, dst_reg, mat_src_reg.to128(), Immediate.u(0b1_00), @@ -2418,12 +2455,12 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { } } else if (src_bits == 64 and dst_bits == 32) { if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( - .vcvtsd2ss, + .{ ._, .vcvtsd2ss }, dst_reg, dst_reg, src_mcv.mem(.qword), ) else try self.asmRegisterRegisterRegister( - .vcvtsd2ss, + .{ ._, .vcvtsd2ss }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -2431,11 +2468,11 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToTmpRegister(src_ty, src_mcv)).to128(), ) else if (src_mcv.isMemory()) try self.asmRegisterMemory( - .cvtsd2ss, + .{ ._, .cvtsd2ss }, dst_reg, src_mcv.mem(.qword), ) else try self.asmRegisterRegister( - .cvtsd2ss, + .{ ._, .cvtsd2ss }, dst_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -2469,22 +2506,22 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { src_mcv.getReg().? else try self.copyToTmpRegister(src_ty, src_mcv); - try self.asmRegisterRegister(.vcvtph2ps, dst_reg, mat_src_reg.to128()); + try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, mat_src_reg.to128()); switch (dst_bits) { 32 => {}, - 64 => try self.asmRegisterRegisterRegister(.vcvtss2sd, dst_reg, dst_reg, dst_reg), + 64 => try self.asmRegisterRegisterRegister(.{ ._, .vcvtss2sd }, dst_reg, dst_reg, dst_reg), else => return self.fail("TODO implement airFpext from {} to {}", .{ src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), } } else if (src_bits == 32 and dst_bits == 64) { if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( - .vcvtss2sd, + .{ ._, .vcvtss2sd }, dst_reg, dst_reg, src_mcv.mem(.dword), ) else try self.asmRegisterRegisterRegister( - .vcvtss2sd, + .{ ._, .vcvtss2sd }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -2492,11 +2529,11 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToTmpRegister(src_ty, src_mcv)).to128(), ) else if (src_mcv.isMemory()) try self.asmRegisterMemory( - .cvtss2sd, + .{ ._, .cvtss2sd }, dst_reg, src_mcv.mem(.dword), ) else try self.asmRegisterRegister( - .cvtss2sd, + .{ ._, .cvtss2sd }, dst_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -2537,12 +2574,12 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { switch (dst_mcv) { .register => |dst_reg| { const min_abi_size = @min(dst_abi_size, src_abi_size); - const tag: Mir.Inst.Tag = switch (signedness) { - .signed => if (min_abi_size >= 4) .movsxd else .movsx, - .unsigned => if (min_abi_size >= 4) .mov else .movzx, + const tag: Mir.Inst.FixedTag = switch (signedness) { + .signed => if (min_abi_size >= 4) .{ ._d, .movsx } else .{ ._, .movsx }, + .unsigned => if (min_abi_size >= 4) .{ ._, .mov } else .{ ._, .movzx }, }; - const dst_alias = switch (tag) { - .movsx, .movsxd => dst_reg.to64(), + const dst_alias = switch (tag[1]) { + .movsx => dst_reg.to64(), .mov, .movzx => if (min_abi_size > 4) dst_reg.to64() else dst_reg.to32(), else => unreachable, }; @@ -2570,14 +2607,24 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { try self.genCopy(min_ty, dst_mcv, src_mcv); const extra = dst_abi_size * 8 - dst_int_info.bits; if (extra > 0) { - try self.genShiftBinOpMir(switch (signedness) { - .signed => .sal, - .unsigned => .shl, - }, dst_ty, dst_mcv, .{ .immediate = extra }); - try self.genShiftBinOpMir(switch (signedness) { - .signed => .sar, - .unsigned => .shr, - }, dst_ty, dst_mcv, .{ .immediate = extra }); + try self.genShiftBinOpMir( + switch (signedness) { + .signed => .{ ._l, .sa }, + .unsigned => .{ ._l, .sh }, + }, + dst_ty, + dst_mcv, + .{ .immediate = extra }, + ); + try self.genShiftBinOpMir( + switch (signedness) { + .signed => .{ ._r, .sa }, + .unsigned => .{ ._r, .sh }, + }, + dst_ty, + dst_mcv, + .{ .immediate = extra }, + ); } }, } @@ -2762,8 +2809,8 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const cc: Condition = if (ty.isSignedInt()) cc: { try self.genSetReg(limit_reg, ty, dst_mcv); - try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); - try self.genBinOpMir(.xor, ty, limit_mcv, .{ + try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); + try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, }); break :cc .o; @@ -2773,7 +2820,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { }); break :cc .c; }; - try self.genBinOpMir(.add, ty, dst_mcv, rhs_mcv); + try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); try self.asmCmovccRegisterRegister( @@ -2813,8 +2860,8 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const cc: Condition = if (ty.isSignedInt()) cc: { try self.genSetReg(limit_reg, ty, dst_mcv); - try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); - try self.genBinOpMir(.xor, ty, limit_mcv, .{ + try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); + try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, }); break :cc .o; @@ -2822,7 +2869,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { try self.genSetReg(limit_reg, ty, .{ .immediate = 0 }); break :cc .c; }; - try self.genBinOpMir(.sub, ty, dst_mcv, rhs_mcv); + try self.genBinOpMir(.{ ._, .sub }, ty, dst_mcv, rhs_mcv); const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); try self.asmCmovccRegisterRegister( @@ -2864,9 +2911,9 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const cc: Condition = if (ty.isSignedInt()) cc: { try self.genSetReg(limit_reg, ty, lhs_mcv); - try self.genBinOpMir(.xor, ty, limit_mcv, rhs_mcv); - try self.genShiftBinOpMir(.sar, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); - try self.genBinOpMir(.xor, ty, limit_mcv, .{ + try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv); + try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); + try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, }); break :cc .o; @@ -2979,7 +3026,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }; defer if (tmp_lock) |lock| self.register_manager.unlockReg(lock); - try self.genBinOpMir(.cmp, lhs_ty, tmp_mcv, lhs); + try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, tmp_mcv, lhs); const cc = Condition.ne; const tuple_ty = self.air.typeOfIndex(inst); @@ -3066,12 +3113,17 @@ fn genSetFrameTruncatedOverflowCompare( src_mcv; try self.genSetReg(scratch_reg, hi_limb_ty, hi_limb_mcv); try self.truncateRegister(hi_limb_ty, scratch_reg); - try self.genBinOpMir(.cmp, hi_limb_ty, .{ .register = scratch_reg }, hi_limb_mcv); + try self.genBinOpMir(.{ ._, .cmp }, hi_limb_ty, .{ .register = scratch_reg }, hi_limb_mcv); const eq_reg = temp_regs[2]; if (overflow_cc) |_| { try self.asmSetccRegister(eq_reg.to8(), .ne); - try self.genBinOpMir(.@"or", Type.u8, .{ .register = overflow_reg }, .{ .register = eq_reg }); + try self.genBinOpMir( + .{ ._, .@"or" }, + Type.u8, + .{ .register = overflow_reg }, + .{ .register = eq_reg }, + ); } const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)); @@ -3200,28 +3252,25 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { /// Generates signed or unsigned integer multiplication/division. /// Clobbers .rax and .rdx registers. /// Quotient is saved in .rax and remainder in .rdx. -fn genIntMulDivOpMir( - self: *Self, - tag: Mir.Inst.Tag, - ty: Type, - lhs: MCValue, - rhs: MCValue, -) !void { +fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void { const abi_size = @intCast(u32, ty.abiSize(self.target.*)); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); } try self.genSetReg(.rax, ty, lhs); - switch (tag) { + switch (tag[1]) { else => unreachable, - .mul, .imul => {}, - .div => try self.asmRegisterRegister(.xor, .edx, .edx), - .idiv => switch (self.regBitSize(ty)) { - 8 => try self.asmOpOnly(.cbw), - 16 => try self.asmOpOnly(.cwd), - 32 => try self.asmOpOnly(.cdq), - 64 => try self.asmOpOnly(.cqo), + .mul => {}, + .div => switch (tag[0]) { + ._ => try self.asmRegisterRegister(.{ ._, .xor }, .edx, .edx), + .i_ => switch (self.regBitSize(ty)) { + 8 => try self.asmOpOnly(.{ ._, .cbw }), + 16 => try self.asmOpOnly(.{ ._, .cwd }), + 32 => try self.asmOpOnly(.{ ._, .cdq }), + 64 => try self.asmOpOnly(.{ ._, .cqo }), + else => unreachable, + }, else => unreachable, }, } @@ -3259,23 +3308,28 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa const divisor_lock = self.register_manager.lockReg(divisor); defer if (divisor_lock) |lock| self.register_manager.unlockReg(lock); - try self.genIntMulDivOpMir(switch (int_info.signedness) { - .signed => .idiv, - .unsigned => .div, - }, ty, .{ .register = dividend }, .{ .register = divisor }); + try self.genIntMulDivOpMir( + switch (int_info.signedness) { + .signed => .{ .i_, .div }, + .unsigned => .{ ._, .div }, + }, + ty, + .{ .register = dividend }, + .{ .register = divisor }, + ); try self.asmRegisterRegister( - .xor, + .{ ._, .xor }, registerAlias(divisor, abi_size), registerAlias(dividend, abi_size), ); try self.asmRegisterImmediate( - .sar, + .{ ._r, .sa }, registerAlias(divisor, abi_size), Immediate.u(int_info.bits - 1), ); try self.asmRegisterRegister( - .@"test", + .{ ._, .@"test" }, registerAlias(.rdx, abi_size), registerAlias(.rdx, abi_size), ); @@ -3284,7 +3338,7 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa registerAlias(.rdx, abi_size), .z, ); - try self.genBinOpMir(.add, ty, .{ .register = divisor }, .{ .register = .rax }); + try self.genBinOpMir(.{ ._, .add }, ty, .{ .register = divisor }, .{ .register = .rax }); return MCValue{ .register = divisor }; } @@ -3406,7 +3460,12 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand); if (err_off > 0) { const shift = @intCast(u6, err_off * 8); - try self.genShiftBinOpMir(.shr, err_union_ty, result, .{ .immediate = shift }); + try self.genShiftBinOpMir( + .{ ._r, .sh }, + err_union_ty, + result, + .{ .immediate = shift }, + ); } else { try self.truncateRegister(Type.anyerror, result.register); } @@ -3458,7 +3517,12 @@ fn genUnwrapErrorUnionPayloadMir( .{ .register = try self.copyToTmpRegister(err_union_ty, err_union) }; if (payload_off > 0) { const shift = @intCast(u6, payload_off * 8); - try self.genShiftBinOpMir(.shr, err_union_ty, result_mcv, .{ .immediate = shift }); + try self.genShiftBinOpMir( + .{ ._r, .sh }, + err_union_ty, + result_mcv, + .{ .immediate = shift }, + ); } else { try self.truncateRegister(payload_ty, result_mcv.register); } @@ -3495,7 +3559,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ .base = .{ .reg = src_reg }, @@ -3533,7 +3597,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); try self.asmRegisterMemory( - .lea, + .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), Memory.sib(.qword, .{ .base = .{ .reg = src_reg }, .disp = pl_off }), ); @@ -3559,7 +3623,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); try self.asmMemoryImmediate( - .mov, + .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ .base = .{ .reg = src_reg }, .disp = err_off, @@ -3580,7 +3644,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); try self.asmRegisterMemory( - .lea, + .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), Memory.sib(.qword, .{ .base = .{ .reg = src_reg }, .disp = pl_off }), ); @@ -3631,13 +3695,13 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, .register => |opt_reg| try self.asmRegisterImmediate( - .bts, + .{ ._s, .bt }, opt_reg, Immediate.u(@intCast(u6, pl_abi_size * 8)), ), .load_frame => |frame_addr| try self.asmMemoryImmediate( - .mov, + .{ ._, .mov }, Memory.sib(.byte, .{ .base = .{ .frame = frame_addr.index }, .disp = frame_addr.off + pl_abi_size, @@ -3749,7 +3813,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); try self.asmRegisterMemory( - .lea, + .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), Memory.sib(.qword, .{ .base = .{ .reg = src_reg }, @@ -3823,7 +3887,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { try self.genSetReg(addr_reg, Type.usize, slice_mcv); // TODO we could allocate register here, but need to expect addr register and potentially // offset register. - try self.genBinOpMir(.add, slice_ptr_field_type, .{ .register = addr_reg }, .{ + try self.genBinOpMir(.{ ._, .add }, slice_ptr_field_type, .{ .register = addr_reg }, .{ .register = offset_reg, }); return MCValue{ .register = addr_reg.to64() }; @@ -3881,13 +3945,13 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, self.target.*)); try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array); try self.asmRegisterMemory( - .lea, + .{ ._, .lea }, addr_reg, Memory.sib(.qword, .{ .base = .{ .frame = frame_index } }), ); }, .load_frame => |frame_addr| try self.asmRegisterMemory( - .lea, + .{ ._, .lea }, addr_reg, Memory.sib(.qword, .{ .base = .{ .frame = frame_addr.index }, .disp = frame_addr.off }), ), @@ -3903,7 +3967,12 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { // TODO we could allocate register here, but need to expect addr register and potentially // offset register. const dst_mcv = try self.allocRegOrMem(inst, false); - try self.genBinOpMir(.add, Type.usize, .{ .register = addr_reg }, .{ .register = offset_reg }); + try self.genBinOpMir( + .{ ._, .add }, + Type.usize, + .{ .register = addr_reg }, + .{ .register = offset_reg }, + ); try self.genCopy(elem_ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg } }); return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -3937,7 +4006,11 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { try self.copyToTmpRegister(ptr_ty, ptr_mcv); const elem_ptr_lock = self.register_manager.lockRegAssumeUnused(elem_ptr_reg); defer self.register_manager.unlockReg(elem_ptr_lock); - try self.asmRegisterRegister(.add, elem_ptr_reg, offset_reg); + try self.asmRegisterRegister( + .{ ._, .add }, + elem_ptr_reg, + offset_reg, + ); const dst_mcv = try self.allocRegOrMem(inst, true); const dst_lock = switch (dst_mcv) { @@ -3977,7 +4050,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(offset_reg_lock); const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr); - try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg }); + try self.genBinOpMir(.{ ._, .add }, ptr_ty, dst_mcv, .{ .register = offset_reg }); return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none }); } @@ -4010,7 +4083,12 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const adjusted_ptr: MCValue = if (layout.payload_size > 0 and layout.tag_align < layout.payload_align) blk: { // TODO reusing the operand const reg = try self.copyToTmpRegister(ptr_union_ty, ptr); - try self.genBinOpMir(.add, ptr_union_ty, .{ .register = reg }, .{ .immediate = layout.payload_size }); + try self.genBinOpMir( + .{ ._, .add }, + ptr_union_ty, + .{ .register = reg }, + .{ .immediate = layout.payload_size }, + ); break :blk MCValue{ .register = reg }; } else ptr; @@ -4063,7 +4141,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { else 0; const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand); - try self.genShiftBinOpMir(.shr, Type.usize, result, .{ .immediate = shift }); + try self.genShiftBinOpMir(.{ ._r, .sh }, Type.usize, result, .{ .immediate = shift }); break :blk MCValue{ .register = registerAlias(result.register, @intCast(u32, layout.tag_size)), }; @@ -4100,11 +4178,11 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const src_bits = src_ty.bitSize(self.target.*); if (self.hasFeature(.lzcnt)) { if (src_bits <= 64) { - try self.genBinOpMir(.lzcnt, src_ty, dst_mcv, mat_src_mcv); + try self.genBinOpMir(.{ ._, .lzcnt }, src_ty, dst_mcv, mat_src_mcv); const extra_bits = self.regExtraBits(src_ty); if (extra_bits > 0) { - try self.genBinOpMir(.sub, dst_ty, dst_mcv, .{ .immediate = extra_bits }); + try self.genBinOpMir(.{ ._, .sub }, dst_ty, dst_mcv, .{ .immediate = extra_bits }); } } else if (src_bits <= 128) { const tmp_reg = try self.register_manager.allocReg(null, gp); @@ -4112,13 +4190,23 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - try self.genBinOpMir(.lzcnt, Type.u64, dst_mcv, mat_src_mcv); - try self.genBinOpMir(.add, dst_ty, dst_mcv, .{ .immediate = 64 }); - try self.genBinOpMir(.lzcnt, Type.u64, tmp_mcv, mat_src_mcv.address().offset(8).deref()); + try self.genBinOpMir(.{ ._, .lzcnt }, Type.u64, dst_mcv, mat_src_mcv); + try self.genBinOpMir(.{ ._, .add }, dst_ty, dst_mcv, .{ .immediate = 64 }); + try self.genBinOpMir( + .{ ._, .lzcnt }, + Type.u64, + tmp_mcv, + mat_src_mcv.address().offset(8).deref(), + ); try self.asmCmovccRegisterRegister(dst_reg.to32(), tmp_reg.to32(), .nc); if (src_bits < 128) { - try self.genBinOpMir(.sub, dst_ty, dst_mcv, .{ .immediate = 128 - src_bits }); + try self.genBinOpMir( + .{ ._, .sub }, + dst_ty, + dst_mcv, + .{ .immediate = 128 - src_bits }, + ); } } else return self.fail("TODO airClz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); break :result dst_mcv; @@ -4130,7 +4218,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits ^ (src_bits - 1), }); - try self.genBinOpMir(.bsr, src_ty, dst_mcv, mat_src_mcv); + try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); try self.asmCmovccRegisterRegister( @@ -4139,12 +4227,12 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { .z, ); - try self.genBinOpMir(.xor, dst_ty, dst_mcv, .{ .immediate = src_bits - 1 }); + try self.genBinOpMir(.{ ._, .xor }, dst_ty, dst_mcv, .{ .immediate = src_bits - 1 }); } else { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - self.regBitSize(dst_ty)), }); - try self.genBinOpMir(.bsr, src_ty, dst_mcv, mat_src_mcv); + try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); try self.asmCmovccRegisterRegister( @@ -4154,7 +4242,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { ); try self.genSetReg(dst_reg, dst_ty, .{ .immediate = src_bits - 1 }); - try self.genBinOpMir(.sub, dst_ty, dst_mcv, .{ .register = imm_reg }); + try self.genBinOpMir(.{ ._, .sub }, dst_ty, dst_mcv, .{ .register = imm_reg }); } break :result dst_mcv; }; @@ -4195,7 +4283,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { break :tmp dst_mcv; }; try self.genBinOpMir( - .@"or", + .{ ._, .@"or" }, src_ty, tmp_mcv, .{ .immediate = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - extra_bits)) << @@ -4203,7 +4291,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { ); break :masked tmp_mcv; } else mat_src_mcv; - try self.genBinOpMir(.tzcnt, src_ty, dst_mcv, masked_mcv); + try self.genBinOpMir(.{ ._, .tzcnt }, src_ty, dst_mcv, masked_mcv); } else if (src_bits <= 128) { const tmp_reg = try self.register_manager.allocReg(null, gp); const tmp_mcv = MCValue{ .register = tmp_reg }; @@ -4213,16 +4301,16 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const masked_mcv = if (src_bits < 128) masked: { try self.genCopy(Type.u64, dst_mcv, mat_src_mcv.address().offset(8).deref()); try self.genBinOpMir( - .@"or", + .{ ._, .@"or" }, Type.u64, dst_mcv, .{ .immediate = @as(u64, math.maxInt(u64)) << @intCast(u6, src_bits - 64) }, ); break :masked dst_mcv; } else mat_src_mcv.address().offset(8).deref(); - try self.genBinOpMir(.tzcnt, Type.u64, dst_mcv, masked_mcv); - try self.genBinOpMir(.add, dst_ty, dst_mcv, .{ .immediate = 64 }); - try self.genBinOpMir(.tzcnt, Type.u64, tmp_mcv, mat_src_mcv); + try self.genBinOpMir(.{ ._, .tzcnt }, Type.u64, dst_mcv, masked_mcv); + try self.genBinOpMir(.{ ._, .add }, dst_ty, dst_mcv, .{ .immediate = 64 }); + try self.genBinOpMir(.{ ._, .tzcnt }, Type.u64, tmp_mcv, mat_src_mcv); try self.asmCmovccRegisterRegister(dst_reg.to32(), tmp_reg.to32(), .nc); } else return self.fail("TODO airCtz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); break :result dst_mcv; @@ -4232,7 +4320,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO airCtz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); const width_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits }); - try self.genBinOpMir(.bsf, src_ty, dst_mcv, mat_src_mcv); + try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); try self.asmCmovccRegisterRegister( @@ -4270,7 +4358,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { .{ .register = try self.register_manager.allocReg(inst, gp) }; const popcnt_ty = if (src_abi_size > 1) src_ty else Type.u16; - try self.genBinOpMir(.popcnt, popcnt_ty, dst_mcv, mat_src_mcv); + try self.genBinOpMir(.{ ._, .popcnt }, popcnt_ty, dst_mcv, mat_src_mcv); break :result dst_mcv; } @@ -4301,54 +4389,54 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { undefined; // dst = operand - try self.asmRegisterRegister(.mov, tmp, dst); + try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst); // tmp = operand - try self.asmRegisterImmediate(.shr, tmp, Immediate.u(1)); + try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, Immediate.u(1)); // tmp = operand >> 1 if (src_abi_size > 4) { - try self.asmRegisterImmediate(.mov, imm, imm_0_1); - try self.asmRegisterRegister(.@"and", tmp, imm); - } else try self.asmRegisterImmediate(.@"and", tmp, imm_0_1); + try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0_1); + try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm); + } else try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_0_1); // tmp = (operand >> 1) & 0x55...55 - try self.asmRegisterRegister(.sub, dst, tmp); + try self.asmRegisterRegister(.{ ._, .sub }, dst, tmp); // dst = temp1 = operand - ((operand >> 1) & 0x55...55) - try self.asmRegisterRegister(.mov, tmp, dst); + try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst); // tmp = temp1 - try self.asmRegisterImmediate(.shr, dst, Immediate.u(2)); + try self.asmRegisterImmediate(.{ ._r, .sh }, dst, Immediate.u(2)); // dst = temp1 >> 2 if (src_abi_size > 4) { - try self.asmRegisterImmediate(.mov, imm, imm_00_11); - try self.asmRegisterRegister(.@"and", tmp, imm); - try self.asmRegisterRegister(.@"and", dst, imm); + try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_00_11); + try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm); + try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm); } else { - try self.asmRegisterImmediate(.@"and", tmp, imm_00_11); - try self.asmRegisterImmediate(.@"and", dst, imm_00_11); + try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_00_11); + try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_00_11); } // tmp = temp1 & 0x33...33 // dst = (temp1 >> 2) & 0x33...33 - try self.asmRegisterRegister(.add, tmp, dst); + try self.asmRegisterRegister(.{ ._, .add }, tmp, dst); // tmp = temp2 = (temp1 & 0x33...33) + ((temp1 >> 2) & 0x33...33) - try self.asmRegisterRegister(.mov, dst, tmp); + try self.asmRegisterRegister(.{ ._, .mov }, dst, tmp); // dst = temp2 - try self.asmRegisterImmediate(.shr, tmp, Immediate.u(4)); + try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, Immediate.u(4)); // tmp = temp2 >> 4 - try self.asmRegisterRegister(.add, dst, tmp); + try self.asmRegisterRegister(.{ ._, .add }, dst, tmp); // dst = temp2 + (temp2 >> 4) if (src_abi_size > 4) { - try self.asmRegisterImmediate(.mov, imm, imm_0000_1111); - try self.asmRegisterImmediate(.mov, tmp, imm_0000_0001); - try self.asmRegisterRegister(.@"and", dst, imm); - try self.asmRegisterRegister(.imul, dst, tmp); + try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0000_1111); + try self.asmRegisterImmediate(.{ ._, .mov }, tmp, imm_0000_0001); + try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm); + try self.asmRegisterRegister(.{ .i_, .mul }, dst, tmp); } else { - try self.asmRegisterImmediate(.@"and", dst, imm_0000_1111); + try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_0000_1111); if (src_abi_size > 1) { - try self.asmRegisterRegisterImmediate(.imul, dst, dst, imm_0000_0001); + try self.asmRegisterRegisterImmediate(.{ .i_, .mul }, dst, dst, imm_0000_0001); } } // dst = temp3 = (temp2 + (temp2 >> 4)) & 0x0f...0f // dst = temp3 * 0x01...01 if (src_abi_size > 1) { - try self.asmRegisterImmediate(.shr, dst, Immediate.u((src_abi_size - 1) * 8)); + try self.asmRegisterImmediate(.{ ._r, .sh }, dst, Immediate.u((src_abi_size - 1) * 8)); } // dst = (temp3 * 0x01...01) >> (bits - 8) } @@ -4377,11 +4465,11 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m 16 => if ((mem_ok or src_mcv.isRegister()) and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) { - try self.genBinOpMir(.rol, src_ty, src_mcv, .{ .immediate = 8 }); + try self.genBinOpMir(.{ ._l, .ro }, src_ty, src_mcv, .{ .immediate = 8 }); return src_mcv; }, 32, 64 => if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) { - try self.genUnOpMir(.bswap, src_ty, src_mcv); + try self.genUnOpMir(.{ ._, .bswap }, src_ty, src_mcv); return src_mcv; }, } @@ -4398,10 +4486,10 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m try self.genSetReg(dst_mcv.register, src_ty, src_mcv); switch (src_bits) { else => unreachable, - 16 => try self.genBinOpMir(.rol, src_ty, dst_mcv, .{ .immediate = 8 }), - 32, 64 => try self.genUnOpMir(.bswap, src_ty, dst_mcv), + 16 => try self.genBinOpMir(.{ ._l, .ro }, src_ty, dst_mcv, .{ .immediate = 8 }), + 32, 64 => try self.genUnOpMir(.{ ._, .bswap }, src_ty, dst_mcv), } - } else try self.genBinOpMir(.movbe, src_ty, dst_mcv, src_mcv); + } else try self.genBinOpMir(.{ ._, .movbe }, src_ty, dst_mcv, src_mcv); return dst_mcv; } @@ -4410,7 +4498,7 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - try self.genBinOpMir(.movbe, src_ty, dst_mcv, src_mcv); + try self.genBinOpMir(.{ ._, .movbe }, src_ty, dst_mcv, src_mcv); return dst_mcv; } @@ -4424,7 +4512,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .sar else .shr, + if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4464,40 +4552,40 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { const imm_0_1 = Immediate.u(mask / 0b1_1); // dst = temp1 = bswap(operand) - try self.asmRegisterRegister(.mov, tmp, dst); + try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst); // tmp = temp1 - try self.asmRegisterImmediate(.shr, dst, Immediate.u(4)); + try self.asmRegisterImmediate(.{ ._r, .sh }, dst, Immediate.u(4)); // dst = temp1 >> 4 if (src_abi_size > 4) { - try self.asmRegisterImmediate(.mov, imm, imm_0000_1111); - try self.asmRegisterRegister(.@"and", tmp, imm); - try self.asmRegisterRegister(.@"and", dst, imm); + try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0000_1111); + try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm); + try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm); } else { - try self.asmRegisterImmediate(.@"and", tmp, imm_0000_1111); - try self.asmRegisterImmediate(.@"and", dst, imm_0000_1111); + try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_0000_1111); + try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_0000_1111); } // tmp = temp1 & 0x0F...0F // dst = (temp1 >> 4) & 0x0F...0F - try self.asmRegisterImmediate(.shl, tmp, Immediate.u(4)); + try self.asmRegisterImmediate(.{ ._l, .sh }, tmp, Immediate.u(4)); // tmp = (temp1 & 0x0F...0F) << 4 - try self.asmRegisterRegister(.@"or", dst, tmp); + try self.asmRegisterRegister(.{ ._, .@"or" }, dst, tmp); // dst = temp2 = ((temp1 >> 4) & 0x0F...0F) | ((temp1 & 0x0F...0F) << 4) - try self.asmRegisterRegister(.mov, tmp, dst); + try self.asmRegisterRegister(.{ ._, .mov }, tmp, dst); // tmp = temp2 - try self.asmRegisterImmediate(.shr, dst, Immediate.u(2)); + try self.asmRegisterImmediate(.{ ._r, .sh }, dst, Immediate.u(2)); // dst = temp2 >> 2 if (src_abi_size > 4) { - try self.asmRegisterImmediate(.mov, imm, imm_00_11); - try self.asmRegisterRegister(.@"and", tmp, imm); - try self.asmRegisterRegister(.@"and", dst, imm); + try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_00_11); + try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm); + try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm); } else { - try self.asmRegisterImmediate(.@"and", tmp, imm_00_11); - try self.asmRegisterImmediate(.@"and", dst, imm_00_11); + try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_00_11); + try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_00_11); } // tmp = temp2 & 0x33...33 // dst = (temp2 >> 2) & 0x33...33 try self.asmRegisterMemory( - .lea, + .{ ._, .lea }, if (src_abi_size > 4) tmp.to64() else tmp.to32(), Memory.sib(.qword, .{ .base = .{ .reg = dst.to64() }, @@ -4505,22 +4593,22 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { }), ); // tmp = temp3 = ((temp2 >> 2) & 0x33...33) + ((temp2 & 0x33...33) << 2) - try self.asmRegisterRegister(.mov, dst, tmp); + try self.asmRegisterRegister(.{ ._, .mov }, dst, tmp); // dst = temp3 - try self.asmRegisterImmediate(.shr, tmp, Immediate.u(1)); + try self.asmRegisterImmediate(.{ ._r, .sh }, tmp, Immediate.u(1)); // tmp = temp3 >> 1 if (src_abi_size > 4) { - try self.asmRegisterImmediate(.mov, imm, imm_0_1); - try self.asmRegisterRegister(.@"and", dst, imm); - try self.asmRegisterRegister(.@"and", tmp, imm); + try self.asmRegisterImmediate(.{ ._, .mov }, imm, imm_0_1); + try self.asmRegisterRegister(.{ ._, .@"and" }, dst, imm); + try self.asmRegisterRegister(.{ ._, .@"and" }, tmp, imm); } else { - try self.asmRegisterImmediate(.@"and", dst, imm_0_1); - try self.asmRegisterImmediate(.@"and", tmp, imm_0_1); + try self.asmRegisterImmediate(.{ ._, .@"and" }, dst, imm_0_1); + try self.asmRegisterImmediate(.{ ._, .@"and" }, tmp, imm_0_1); } // dst = temp3 & 0x55...55 // tmp = (temp3 >> 1) & 0x55...55 try self.asmRegisterMemory( - .lea, + .{ ._, .lea }, if (src_abi_size > 4) dst.to64() else dst.to32(), Memory.sib(.qword, .{ .base = .{ .reg = tmp.to64() }, @@ -4533,7 +4621,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .sar else .shr, + if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4590,8 +4678,8 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(switch (ty_bits) { // No point using an extra prefix byte for *pd which performs the same operation. 16, 32, 64, 128 => switch (tag) { - .neg => .xorps, - .fabs => .andnps, + .neg => .{ ._, .xorps }, + .fabs => .{ ._, .andnps }, else => unreachable, }, 80 => return self.fail("TODO implement airFloatSign for {}", .{ @@ -4622,25 +4710,25 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 if (!self.hasFeature(.sse4_1)) return self.fail("TODO implement genRound without sse4_1 feature", .{}); - const mir_tag = if (@as(?Mir.Inst.Tag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { - 32 => if (self.hasFeature(.avx)) .vroundss else .roundss, - 64 => if (self.hasFeature(.avx)) .vroundsd else .roundsd, + 32 => if (self.hasFeature(.avx)) .{ ._, .vroundss } else .{ ._, .roundss }, + 64 => if (self.hasFeature(.avx)) .{ ._, .vroundsd } else .{ ._, .roundsd }, 16, 80, 128 => null, else => unreachable, }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .vroundss else .roundss, - 2...4 => if (self.hasFeature(.avx)) .vroundps else .roundps, - 5...8 => if (self.hasFeature(.avx)) .vroundps else null, + 1 => if (self.hasFeature(.avx)) .{ ._, .vroundss } else .{ ._, .roundss }, + 2...4 => if (self.hasFeature(.avx)) .{ ._, .vroundps } else .{ ._, .roundps }, + 5...8 => if (self.hasFeature(.avx)) .{ ._, .vroundps } else null, else => null, }, 64 => switch (ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .vroundsd else .roundsd, - 2 => if (self.hasFeature(.avx)) .vroundpd else .roundpd, - 3...4 => if (self.hasFeature(.avx)) .vroundpd else null, + 1 => if (self.hasFeature(.avx)) .{ ._, .vroundsd } else .{ ._, .roundsd }, + 2 => if (self.hasFeature(.avx)) .{ ._, .vroundpd } else .{ ._, .roundpd }, + 3...4 => if (self.hasFeature(.avx)) .{ ._, .vroundpd } else null, else => null, }, 16, 80, 128 => null, @@ -4655,7 +4743,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 const abi_size = @intCast(u32, ty.abiSize(self.target.*)); const dst_alias = registerAlias(dst_reg, abi_size); - switch (mir_tag) { + switch (mir_tag[1]) { .vroundss, .vroundsd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( mir_tag, dst_alias, @@ -4704,25 +4792,25 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { - const mir_tag = if (@as(?Mir.Inst.Tag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { const mat_src_reg = if (src_mcv.isRegister()) src_mcv.getReg().? else try self.copyToTmpRegister(ty, src_mcv); - try self.asmRegisterRegister(.vcvtph2ps, dst_reg, mat_src_reg.to128()); - try self.asmRegisterRegisterRegister(.vsqrtss, dst_reg, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, mat_src_reg.to128()); + try self.asmRegisterRegisterRegister(.{ ._, .vsqrtss }, dst_reg, dst_reg, dst_reg); try self.asmRegisterRegisterImmediate( - .vcvtps2ph, + .{ ._, .vcvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), ); break :result dst_mcv; } else null, - 32 => if (self.hasFeature(.avx)) .vsqrtss else .sqrtss, - 64 => if (self.hasFeature(.avx)) .vsqrtsd else .sqrtsd, + 32 => if (self.hasFeature(.avx)) .{ ._, .vsqrtss } else .{ ._, .sqrtss }, + 64 => if (self.hasFeature(.avx)) .{ ._, .vsqrtsd } else .{ ._, .sqrtsd }, 80, 128 => null, else => unreachable, }, @@ -4731,16 +4819,21 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { 1 => { try self.asmRegisterRegister( - .vcvtph2ps, + .{ ._, .vcvtph2ps }, dst_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? else try self.copyToTmpRegister(ty, src_mcv)).to128(), ); - try self.asmRegisterRegisterRegister(.vsqrtss, dst_reg, dst_reg, dst_reg); + try self.asmRegisterRegisterRegister( + .{ ._, .vsqrtss }, + dst_reg, + dst_reg, + dst_reg, + ); try self.asmRegisterRegisterImmediate( - .vcvtps2ph, + .{ ._, .vcvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), @@ -4750,22 +4843,22 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 2...8 => { const wide_reg = registerAlias(dst_reg, abi_size * 2); if (src_mcv.isMemory()) try self.asmRegisterMemory( - .vcvtph2ps, + .{ ._, .vcvtph2ps }, wide_reg, src_mcv.mem(Memory.PtrSize.fromSize( @intCast(u32, @divExact(wide_reg.bitSize(), 16)), )), ) else try self.asmRegisterRegister( - .vcvtph2ps, + .{ ._, .vcvtph2ps }, wide_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? else try self.copyToTmpRegister(ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.vsqrtps, wide_reg, wide_reg); + try self.asmRegisterRegister(.{ ._, .vsqrtps }, wide_reg, wide_reg); try self.asmRegisterRegisterImmediate( - .vcvtps2ph, + .{ ._, .vcvtps2ph }, dst_reg, wide_reg, Immediate.u(0b1_00), @@ -4775,15 +4868,15 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { else => null, } else null, 32 => switch (ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .vsqrtss else .sqrtss, - 2...4 => if (self.hasFeature(.avx)) .vsqrtps else .sqrtps, - 5...8 => if (self.hasFeature(.avx)) .vsqrtps else null, + 1 => if (self.hasFeature(.avx)) .{ ._, .vsqrtss } else .{ ._, .sqrtss }, + 2...4 => if (self.hasFeature(.avx)) .{ ._, .vsqrtps } else .{ ._, .sqrtps }, + 5...8 => if (self.hasFeature(.avx)) .{ ._, .vsqrtps } else null, else => null, }, 64 => switch (ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .vsqrtsd else .sqrtsd, - 2 => if (self.hasFeature(.avx)) .vsqrtpd else .sqrtpd, - 3...4 => if (self.hasFeature(.avx)) .vsqrtpd else null, + 1 => if (self.hasFeature(.avx)) .{ ._, .vsqrtsd } else .{ ._, .sqrtsd }, + 2 => if (self.hasFeature(.avx)) .{ ._, .vsqrtpd } else .{ ._, .sqrtpd }, + 3...4 => if (self.hasFeature(.avx)) .{ ._, .vsqrtpd } else null, else => null, }, 80, 128 => null, @@ -4795,7 +4888,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - switch (mir_tag) { + switch (mir_tag[1]) { .vsqrtss, .vsqrtsd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( mir_tag, dst_reg, @@ -4911,14 +5004,14 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn if (load_abi_size <= 8) { const load_reg = registerAlias(dst_reg, load_abi_size); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, load_reg, Memory.sib(Memory.PtrSize.fromSize(load_abi_size), .{ .base = .{ .reg = ptr_reg }, .disp = val_byte_off, }), ); - try self.asmRegisterImmediate(.shr, load_reg, Immediate.u(val_bit_off)); + try self.asmRegisterImmediate(.{ ._r, .sh }, load_reg, Immediate.u(val_bit_off)); } else { const tmp_reg = registerAlias(try self.register_manager.allocReg(null, gp), val_abi_size); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); @@ -4926,7 +5019,7 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn const dst_alias = registerAlias(dst_reg, val_abi_size); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, dst_alias, Memory.sib(Memory.PtrSize.fromSize(val_abi_size), .{ .base = .{ .reg = ptr_reg }, @@ -4934,14 +5027,19 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn }), ); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, tmp_reg, Memory.sib(Memory.PtrSize.fromSize(val_abi_size), .{ .base = .{ .reg = ptr_reg }, .disp = val_byte_off + 1, }), ); - try self.asmRegisterRegisterImmediate(.shrd, dst_alias, tmp_reg, Immediate.u(val_bit_off)); + try self.asmRegisterRegisterImmediate( + .{ ._rd, .sh }, + dst_alias, + tmp_reg, + Immediate.u(val_bit_off), + ); } if (val_extra_bits > 0) try self.truncateRegister(val_ty, dst_reg); @@ -5047,13 +5145,13 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In const part_mask_not = part_mask ^ (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_abi_bits)); if (limb_abi_size <= 4) { - try self.asmMemoryImmediate(.@"and", limb_mem, Immediate.u(part_mask_not)); + try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.u(part_mask_not)); } else if (math.cast(i32, @bitCast(i64, part_mask_not))) |small| { - try self.asmMemoryImmediate(.@"and", limb_mem, Immediate.s(small)); + try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, Immediate.s(small)); } else { const part_mask_reg = try self.register_manager.allocReg(null, gp); - try self.asmRegisterImmediate(.mov, part_mask_reg, Immediate.u(part_mask_not)); - try self.asmMemoryRegister(.@"and", limb_mem, part_mask_reg); + try self.asmRegisterImmediate(.{ ._, .mov }, part_mask_reg, Immediate.u(part_mask_not)); + try self.asmMemoryRegister(.{ ._, .@"and" }, limb_mem, part_mask_reg); } if (src_bit_size <= 64) { @@ -5064,14 +5162,26 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In try self.genSetReg(tmp_reg, src_ty, src_mcv); switch (limb_i) { - 0 => try self.genShiftBinOpMir(.shl, src_ty, tmp_mcv, .{ .immediate = src_bit_off }), - 1 => try self.genShiftBinOpMir(.shr, src_ty, tmp_mcv, .{ - .immediate = limb_abi_bits - src_bit_off, - }), + 0 => try self.genShiftBinOpMir( + .{ ._l, .sh }, + src_ty, + tmp_mcv, + .{ .immediate = src_bit_off }, + ), + 1 => try self.genShiftBinOpMir( + .{ ._r, .sh }, + src_ty, + tmp_mcv, + .{ .immediate = limb_abi_bits - src_bit_off }, + ), else => unreachable, } - try self.genBinOpMir(.@"and", src_ty, tmp_mcv, .{ .immediate = part_mask }); - try self.asmMemoryRegister(.@"or", limb_mem, registerAlias(tmp_reg, limb_abi_size)); + try self.genBinOpMir(.{ ._, .@"and" }, src_ty, tmp_mcv, .{ .immediate = part_mask }); + try self.asmMemoryRegister( + .{ ._, .@"or" }, + limb_mem, + registerAlias(tmp_reg, limb_abi_size), + ); } else return self.fail("TODO: implement packed store of {}", .{ src_ty.fmt(self.bin_file.options.module.?), }); @@ -5171,7 +5281,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, else => mcv, }); - try self.genBinOpMir(.add, Type.usize, dst_mcv, .{ .register = offset_reg }); + try self.genBinOpMir(.{ ._, .add }, Type.usize, dst_mcv, .{ .register = offset_reg }); break :result dst_mcv; }, .indirect => |reg_off| break :result .{ .indirect = .{ @@ -5255,14 +5365,14 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { if (load_abi_size <= 8) { const load_reg = registerAlias(dst_reg, load_abi_size); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, load_reg, Memory.sib(Memory.PtrSize.fromSize(load_abi_size), .{ .base = .{ .frame = frame_addr.index }, .disp = frame_addr.off + field_byte_off, }), ); - try self.asmRegisterImmediate(.shr, load_reg, Immediate.u(field_bit_off)); + try self.asmRegisterImmediate(.{ ._r, .sh }, load_reg, Immediate.u(field_bit_off)); } else { const tmp_reg = registerAlias( try self.register_manager.allocReg(null, gp), @@ -5273,7 +5383,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const dst_alias = registerAlias(dst_reg, field_abi_size); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, dst_alias, Memory.sib(Memory.PtrSize.fromSize(field_abi_size), .{ .base = .{ .frame = frame_addr.index }, @@ -5281,7 +5391,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { }), ); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, tmp_reg, Memory.sib(Memory.PtrSize.fromSize(field_abi_size), .{ .base = .{ .frame = frame_addr.index }, @@ -5289,7 +5399,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { }), ); try self.asmRegisterRegisterImmediate( - .shrd, + .{ ._rd, .sh }, dst_alias, tmp_reg, Immediate.u(field_bit_off), @@ -5325,21 +5435,26 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock); // Shift by struct_field_offset. - try self.genShiftBinOpMir(.shr, Type.usize, dst_mcv, .{ .immediate = field_off }); + try self.genShiftBinOpMir( + .{ ._r, .sh }, + Type.usize, + dst_mcv, + .{ .immediate = field_off }, + ); // Mask to field_bit_size bits const field_bit_size = field_ty.bitSize(self.target.*); const mask = ~@as(u64, 0) >> @intCast(u6, 64 - field_bit_size); const tmp_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = mask }); - try self.genBinOpMir(.@"and", Type.usize, dst_mcv, .{ .register = tmp_reg }); + try self.genBinOpMir(.{ ._, .@"and" }, Type.usize, dst_mcv, .{ .register = tmp_reg }); const signedness = if (field_ty.isAbiInt()) field_ty.intInfo(self.target.*).signedness else .unsigned; const field_byte_size = @intCast(u32, field_ty.abiSize(self.target.*)); if (signedness == .signed and field_byte_size < 8) { try self.asmRegisterRegister( - if (field_byte_size >= 4) .movsxd else .movsx, + if (field_byte_size >= 4) .{ ._d, .movsx } else .{ ._, .movsx }, dst_mcv.register, registerAlias(dst_mcv.register, field_byte_size), ); @@ -5451,17 +5566,17 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: if (limb_pl.base.tag == .int_unsigned and self.regExtraBits(limb_ty) > 0) { const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_pl.data); - try self.genBinOpMir(.xor, limb_ty, limb_mcv, .{ .immediate = mask }); - } else try self.genUnOpMir(.not, limb_ty, limb_mcv); + try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask }); + } else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv); } }, - .neg => try self.genUnOpMir(.neg, src_ty, dst_mcv), + .neg => try self.genUnOpMir(.{ ._, .neg }, src_ty, dst_mcv), else => unreachable, } return dst_mcv; } -fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValue) !void { +fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void { const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, @@ -5504,7 +5619,7 @@ fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValue /// Clobbers .rcx for non-immediate shift value. fn genShiftBinOpMir( self: *Self, - tag: Mir.Inst.Tag, + tag: Mir.Inst.FixedTag, ty: Type, lhs_mcv: MCValue, shift_mcv: MCValue, @@ -5589,16 +5704,16 @@ fn genShiftBinOpMir( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - const info: struct { offsets: [2]i32, double_tag: Mir.Inst.Tag } = switch (tag) { - .shl, .sal => .{ .offsets = .{ 0, 8 }, .double_tag = .shld }, - .shr, .sar => .{ .offsets = .{ 8, 0 }, .double_tag = .shrd }, + const info: struct { offsets: [2]i32, double_tag: Mir.Inst.FixedTag } = switch (tag[0]) { + ._l => .{ .offsets = .{ 0, 8 }, .double_tag = .{ ._ld, .sh } }, + ._r => .{ .offsets = .{ 8, 0 }, .double_tag = .{ ._rd, .sh } }, else => unreachable, }; switch (lhs_mcv) { .load_frame => |dst_frame_addr| switch (rhs_mcv) { .immediate => |rhs_imm| if (rhs_imm == 0) {} else if (rhs_imm < 64) { try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, tmp_reg, Memory.sib(.qword, .{ .base = .{ .frame = dst_frame_addr.index }, @@ -5625,7 +5740,7 @@ fn genShiftBinOpMir( } else { assert(rhs_imm < 128); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, tmp_reg, Memory.sib(.qword, .{ .base = .{ .frame = dst_frame_addr.index }, @@ -5636,34 +5751,30 @@ fn genShiftBinOpMir( try self.asmRegisterImmediate(tag, tmp_reg, Immediate.u(rhs_imm - 64)); } try self.asmMemoryRegister( - .mov, + .{ ._, .mov }, Memory.sib(.qword, .{ .base = .{ .frame = dst_frame_addr.index }, .disp = dst_frame_addr.off + info.offsets[1], }), tmp_reg, ); - switch (tag) { - .shl, .sal, .shr => { - try self.asmRegisterRegister(.xor, tmp_reg.to32(), tmp_reg.to32()); - try self.asmMemoryRegister( - .mov, - Memory.sib(.qword, .{ - .base = .{ .frame = dst_frame_addr.index }, - .disp = dst_frame_addr.off + info.offsets[0], - }), - tmp_reg, - ); - }, - .sar => try self.asmMemoryImmediate( - tag, + if (tag[0] == ._r and tag[1] == .sa) try self.asmMemoryImmediate( + tag, + Memory.sib(.qword, .{ + .base = .{ .frame = dst_frame_addr.index }, + .disp = dst_frame_addr.off + info.offsets[0], + }), + Immediate.u(63), + ) else { + try self.asmRegisterRegister(.{ ._, .xor }, tmp_reg.to32(), tmp_reg.to32()); + try self.asmMemoryRegister( + .{ ._, .mov }, Memory.sib(.qword, .{ .base = .{ .frame = dst_frame_addr.index }, .disp = dst_frame_addr.off + info.offsets[0], }), - Immediate.u(63), - ), - else => unreachable, + tmp_reg, + ); } }, else => { @@ -5677,7 +5788,7 @@ fn genShiftBinOpMir( try self.genSetReg(.cl, Type.u8, rhs_mcv); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, first_reg, Memory.sib(.qword, .{ .base = .{ .frame = dst_frame_addr.index }, @@ -5685,32 +5796,28 @@ fn genShiftBinOpMir( }), ); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, second_reg, Memory.sib(.qword, .{ .base = .{ .frame = dst_frame_addr.index }, .disp = dst_frame_addr.off + info.offsets[1], }), ); - switch (tag) { - .shl, .sal, .shr => try self.asmRegisterRegister( - .xor, - tmp_reg.to32(), - tmp_reg.to32(), - ), - .sar => { - try self.asmRegisterRegister(.mov, tmp_reg, first_reg); - try self.asmRegisterImmediate(tag, tmp_reg, Immediate.u(63)); - }, - else => unreachable, - } + if (tag[0] == ._r and tag[1] == .sa) { + try self.asmRegisterRegister(.{ ._, .mov }, tmp_reg, first_reg); + try self.asmRegisterImmediate(tag, tmp_reg, Immediate.u(63)); + } else try self.asmRegisterRegister( + .{ ._, .xor }, + tmp_reg.to32(), + tmp_reg.to32(), + ); try self.asmRegisterRegisterRegister(info.double_tag, second_reg, first_reg, .cl); try self.asmRegisterRegister(tag, first_reg, .cl); - try self.asmRegisterImmediate(.cmp, .cl, Immediate.u(64)); + try self.asmRegisterImmediate(.{ ._, .cmp }, .cl, Immediate.u(64)); try self.asmCmovccRegisterRegister(second_reg, first_reg, .ae); try self.asmCmovccRegisterRegister(first_reg, tmp_reg, .ae); try self.asmMemoryRegister( - .mov, + .{ ._, .mov }, Memory.sib(.qword, .{ .base = .{ .frame = dst_frame_addr.index }, .disp = dst_frame_addr.off + info.offsets[1], @@ -5718,7 +5825,7 @@ fn genShiftBinOpMir( second_reg, ); try self.asmMemoryRegister( - .mov, + .{ ._, .mov }, Memory.sib(.qword, .{ .base = .{ .frame = dst_frame_addr.index }, .disp = dst_frame_addr.off + info.offsets[0], @@ -5743,7 +5850,7 @@ fn genShiftBinOpMir( /// Asserts .rcx is free. fn genShiftBinOp( self: *Self, - tag: Air.Inst.Tag, + air_tag: Air.Inst.Tag, maybe_inst: ?Air.Inst.Index, lhs_mcv: MCValue, rhs_mcv: MCValue, @@ -5788,14 +5895,14 @@ fn genShiftBinOp( }; const signedness = lhs_ty.intInfo(self.target.*).signedness; - try self.genShiftBinOpMir(switch (tag) { + try self.genShiftBinOpMir(switch (air_tag) { .shl, .shl_exact => switch (signedness) { - .signed => .sal, - .unsigned => .shl, + .signed => .{ ._l, .sa }, + .unsigned => .{ ._l, .sh }, }, .shr, .shr_exact => switch (signedness) { - .signed => .sar, - .unsigned => .shr, + .signed => .{ ._r, .sa }, + .unsigned => .{ ._r, .sh }, }, else => unreachable, }, lhs_ty, dst_mcv, rhs_mcv); @@ -5855,20 +5962,18 @@ fn genMulDivBinOp( try self.register_manager.getReg(.rax, track_inst_rax); try self.register_manager.getReg(.rdx, track_inst_rdx); - const mir_tag: Mir.Inst.Tag = switch (signedness) { + try self.genIntMulDivOpMir(switch (signedness) { .signed => switch (tag) { - .mul, .mulwrap => .imul, - .div_trunc, .div_exact, .rem => .idiv, + .mul, .mulwrap => .{ .i_, .mul }, + .div_trunc, .div_exact, .rem => .{ .i_, .div }, else => unreachable, }, .unsigned => switch (tag) { - .mul, .mulwrap => .mul, - .div_trunc, .div_exact, .rem => .div, + .mul, .mulwrap => .{ ._, .mul }, + .div_trunc, .div_exact, .rem => .{ ._, .div }, else => unreachable, }, - }; - - try self.genIntMulDivOpMir(mir_tag, ty, lhs, rhs); + }, ty, lhs, rhs); if (dst_abi_size <= 8) return .{ .register = registerAlias(switch (tag) { .mul, .mulwrap, .div_trunc, .div_exact => .rax, @@ -5878,7 +5983,7 @@ fn genMulDivBinOp( const dst_mcv = try self.allocRegOrMemAdvanced(dst_ty, maybe_inst, false); try self.asmMemoryRegister( - .mov, + .{ ._, .mov }, Memory.sib(.qword, .{ .base = .{ .frame = dst_mcv.load_frame.index }, .disp = dst_mcv.load_frame.off, @@ -5886,7 +5991,7 @@ fn genMulDivBinOp( .rax, ); try self.asmMemoryRegister( - .mov, + .{ ._, .mov }, Memory.sib(.qword, .{ .base = .{ .frame = dst_mcv.load_frame.index }, .disp = dst_mcv.load_frame.off + 8, @@ -5927,12 +6032,12 @@ fn genMulDivBinOp( try self.copyToRegisterWithInstTracking(inst, ty, lhs) else .{ .register = try self.copyToTmpRegister(ty, lhs) }; - try self.genBinOpMir(.sub, ty, result, div_floor); + try self.genBinOpMir(.{ ._, .sub }, ty, result, div_floor); return result; }, .unsigned => { - try self.genIntMulDivOpMir(.div, ty, lhs, rhs); + try self.genIntMulDivOpMir(.{ ._, .div }, ty, lhs, rhs); return .{ .register = registerAlias(.rdx, abi_size) }; }, } @@ -5974,7 +6079,7 @@ fn genMulDivBinOp( switch (signedness) { .signed => return try self.genInlineIntDivFloor(ty, lhs, actual_rhs), .unsigned => { - try self.genIntMulDivOpMir(.div, ty, lhs, actual_rhs); + try self.genIntMulDivOpMir(.{ ._, .div }, ty, lhs, actual_rhs); return .{ .register = registerAlias(.rax, abi_size) }; }, } @@ -6072,11 +6177,11 @@ fn genBinOp( switch (air_tag) { .add, .addwrap, - => try self.genBinOpMir(.add, lhs_ty, dst_mcv, src_mcv), + => try self.genBinOpMir(.{ ._, .add }, lhs_ty, dst_mcv, src_mcv), .sub, .subwrap, - => try self.genBinOpMir(.sub, lhs_ty, dst_mcv, src_mcv), + => try self.genBinOpMir(.{ ._, .sub }, lhs_ty, dst_mcv, src_mcv), .ptr_add, .ptr_sub, @@ -6088,22 +6193,27 @@ fn genBinOp( const elem_size = lhs_ty.elemType2().abiSize(self.target.*); try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size }); - try self.genBinOpMir(switch (air_tag) { - .ptr_add => .add, - .ptr_sub => .sub, - else => unreachable, - }, lhs_ty, dst_mcv, tmp_mcv); + try self.genBinOpMir( + switch (air_tag) { + .ptr_add => .{ ._, .add }, + .ptr_sub => .{ ._, .sub }, + else => unreachable, + }, + lhs_ty, + dst_mcv, + tmp_mcv, + ); }, .bool_or, .bit_or, - => try self.genBinOpMir(.@"or", lhs_ty, dst_mcv, src_mcv), + => try self.genBinOpMir(.{ ._, .@"or" }, lhs_ty, dst_mcv, src_mcv), .bool_and, .bit_and, - => try self.genBinOpMir(.@"and", lhs_ty, dst_mcv, src_mcv), + => try self.genBinOpMir(.{ ._, .@"and" }, lhs_ty, dst_mcv, src_mcv), - .xor => try self.genBinOpMir(.xor, lhs_ty, dst_mcv, src_mcv), + .xor => try self.genBinOpMir(.{ ._, .xor }, lhs_ty, dst_mcv, src_mcv), .min, .max, @@ -6129,7 +6239,7 @@ fn genBinOp( }; defer if (mat_mcv_lock) |lock| self.register_manager.unlockReg(lock); - try self.genBinOpMir(.cmp, lhs_ty, dst_mcv, mat_src_mcv); + try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, dst_mcv, mat_src_mcv); const int_info = lhs_ty.intInfo(self.target.*); const cc: Condition = switch (int_info.signedness) { @@ -6206,7 +6316,7 @@ fn genBinOp( } const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); - const mir_tag = if (@as(?Mir.Inst.Tag, switch (lhs_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { else => unreachable, .Float => switch (lhs_ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { @@ -6215,13 +6325,13 @@ fn genBinOp( defer self.register_manager.unlockReg(tmp_lock); if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( - .vpinsrw, + .{ ._, .vpinsrw }, dst_reg, dst_reg, src_mcv.mem(.word), Immediate.u(1), ) else try self.asmRegisterRegisterRegister( - .vpunpcklwd, + .{ ._, .vpunpcklwd }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -6229,15 +6339,15 @@ fn genBinOp( else try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg); - try self.asmRegisterRegister(.vmovshdup, tmp_reg, dst_reg); + try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ ._, .vmovshdup }, tmp_reg, dst_reg); try self.asmRegisterRegisterRegister( switch (air_tag) { - .add => .vaddss, - .sub => .vsubss, - .div_float, .div_trunc, .div_floor, .div_exact => .vdivss, - .max => .vmaxss, - .min => .vmaxss, + .add => .{ ._, .vaddss }, + .sub => .{ ._, .vsubss }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivss }, + .max => .{ ._, .vmaxss }, + .min => .{ ._, .vmaxss }, else => unreachable, }, dst_reg, @@ -6245,7 +6355,7 @@ fn genBinOp( tmp_reg, ); try self.asmRegisterRegisterImmediate( - .vcvtps2ph, + .{ ._, .vcvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), @@ -6253,29 +6363,29 @@ fn genBinOp( return dst_mcv; } else null, 32 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .vaddss else .addss, - .sub => if (self.hasFeature(.avx)) .vsubss else .subss, - .mul => if (self.hasFeature(.avx)) .vmulss else .mulss, + .add => if (self.hasFeature(.avx)) .{ ._, .vaddss } else .{ ._, .addss }, + .sub => if (self.hasFeature(.avx)) .{ ._, .vsubss } else .{ ._, .subss }, + .mul => if (self.hasFeature(.avx)) .{ ._, .vmulss } else .{ ._, .mulss }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .vdivss else .divss, - .max => if (self.hasFeature(.avx)) .vmaxss else .maxss, - .min => if (self.hasFeature(.avx)) .vminss else .minss, + => if (self.hasFeature(.avx)) .{ ._, .vdivss } else .{ ._, .divss }, + .max => if (self.hasFeature(.avx)) .{ ._, .vmaxss } else .{ ._, .maxss }, + .min => if (self.hasFeature(.avx)) .{ ._, .vminss } else .{ ._, .minss }, else => unreachable, }, 64 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .vaddsd else .addsd, - .sub => if (self.hasFeature(.avx)) .vsubsd else .subsd, - .mul => if (self.hasFeature(.avx)) .vmulsd else .mulsd, + .add => if (self.hasFeature(.avx)) .{ ._, .vaddsd } else .{ ._, .addsd }, + .sub => if (self.hasFeature(.avx)) .{ ._, .vsubsd } else .{ ._, .subsd }, + .mul => if (self.hasFeature(.avx)) .{ ._, .vmulsd } else .{ ._, .mulsd }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .vdivsd else .divsd, - .max => if (self.hasFeature(.avx)) .vmaxsd else .maxsd, - .min => if (self.hasFeature(.avx)) .vminsd else .minsd, + => if (self.hasFeature(.avx)) .{ ._, .vdivsd } else .{ ._, .divsd }, + .max => if (self.hasFeature(.avx)) .{ ._, .vmaxsd } else .{ ._, .maxsd }, + .min => if (self.hasFeature(.avx)) .{ ._, .vminsd } else .{ ._, .minsd }, else => unreachable, }, 80, 128 => null, @@ -6291,13 +6401,13 @@ fn genBinOp( defer self.register_manager.unlockReg(tmp_lock); if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( - .vpinsrw, + .{ ._, .vpinsrw }, dst_reg, dst_reg, src_mcv.mem(.word), Immediate.u(1), ) else try self.asmRegisterRegisterRegister( - .vpunpcklwd, + .{ ._, .vpunpcklwd }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -6305,15 +6415,15 @@ fn genBinOp( else try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg); - try self.asmRegisterRegister(.vmovshdup, tmp_reg, dst_reg); + try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ ._, .vmovshdup }, tmp_reg, dst_reg); try self.asmRegisterRegisterRegister( switch (air_tag) { - .add => .vaddss, - .sub => .vsubss, - .div_float, .div_trunc, .div_floor, .div_exact => .vdivss, - .max => .vmaxss, - .min => .vmaxss, + .add => .{ ._, .vaddss }, + .sub => .{ ._, .vsubss }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivss }, + .max => .{ ._, .vmaxss }, + .min => .{ ._, .vmaxss }, else => unreachable, }, dst_reg, @@ -6321,7 +6431,7 @@ fn genBinOp( tmp_reg, ); try self.asmRegisterRegisterImmediate( - .vcvtps2ph, + .{ ._, .vcvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), @@ -6334,12 +6444,12 @@ fn genBinOp( defer self.register_manager.unlockReg(tmp_lock); if (src_mcv.isMemory()) try self.asmRegisterMemoryImmediate( - .vpinsrd, + .{ ._, .vpinsrd }, dst_reg, src_mcv.mem(.dword), Immediate.u(1), ) else try self.asmRegisterRegisterRegister( - .vunpcklps, + .{ ._, .vunpcklps }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -6347,15 +6457,20 @@ fn genBinOp( else try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg); - try self.asmRegisterRegisterRegister(.vmovhlps, tmp_reg, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegisterRegister( + .{ ._, .vmovhlps }, + tmp_reg, + dst_reg, + dst_reg, + ); try self.asmRegisterRegisterRegister( switch (air_tag) { - .add => .vaddps, - .sub => .vsubps, - .div_float, .div_trunc, .div_floor, .div_exact => .vdivps, - .max => .vmaxps, - .min => .vmaxps, + .add => .{ ._, .vaddps }, + .sub => .{ ._, .vsubps }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivps }, + .max => .{ ._, .vmaxps }, + .min => .{ ._, .vmaxps }, else => unreachable, }, dst_reg, @@ -6363,7 +6478,7 @@ fn genBinOp( tmp_reg, ); try self.asmRegisterRegisterImmediate( - .vcvtps2ph, + .{ ._, .vcvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), @@ -6375,13 +6490,13 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - try self.asmRegisterRegister(.vcvtph2ps, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, dst_reg); if (src_mcv.isMemory()) try self.asmRegisterMemory( - .vcvtph2ps, + .{ ._, .vcvtph2ps }, tmp_reg, src_mcv.mem(.qword), ) else try self.asmRegisterRegister( - .vcvtph2ps, + .{ ._, .vcvtph2ps }, tmp_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -6390,11 +6505,11 @@ fn genBinOp( ); try self.asmRegisterRegisterRegister( switch (air_tag) { - .add => .vaddps, - .sub => .vsubps, - .div_float, .div_trunc, .div_floor, .div_exact => .vdivps, - .max => .vmaxps, - .min => .vmaxps, + .add => .{ ._, .vaddps }, + .sub => .{ ._, .vsubps }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivps }, + .max => .{ ._, .vmaxps }, + .min => .{ ._, .vmaxps }, else => unreachable, }, dst_reg, @@ -6402,7 +6517,7 @@ fn genBinOp( tmp_reg, ); try self.asmRegisterRegisterImmediate( - .vcvtps2ph, + .{ ._, .vcvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), @@ -6414,13 +6529,13 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - try self.asmRegisterRegister(.vcvtph2ps, dst_reg.to256(), dst_reg); + try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg.to256(), dst_reg); if (src_mcv.isMemory()) try self.asmRegisterMemory( - .vcvtph2ps, + .{ ._, .vcvtph2ps }, tmp_reg, src_mcv.mem(.xword), ) else try self.asmRegisterRegister( - .vcvtph2ps, + .{ ._, .vcvtph2ps }, tmp_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -6429,11 +6544,11 @@ fn genBinOp( ); try self.asmRegisterRegisterRegister( switch (air_tag) { - .add => .vaddps, - .sub => .vsubps, - .div_float, .div_trunc, .div_floor, .div_exact => .vdivps, - .max => .vmaxps, - .min => .vmaxps, + .add => .{ ._, .vaddps }, + .sub => .{ ._, .vsubps }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivps }, + .max => .{ ._, .vmaxps }, + .min => .{ ._, .vmaxps }, else => unreachable, }, dst_reg.to256(), @@ -6441,7 +6556,7 @@ fn genBinOp( tmp_reg, ); try self.asmRegisterRegisterImmediate( - .vcvtps2ph, + .{ ._, .vcvtps2ph }, dst_reg, dst_reg.to256(), Immediate.u(0b1_00), @@ -6452,76 +6567,76 @@ fn genBinOp( } else null, 32 => switch (lhs_ty.vectorLen()) { 1 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .vaddss else .addss, - .sub => if (self.hasFeature(.avx)) .vsubss else .subss, - .mul => if (self.hasFeature(.avx)) .vmulss else .mulss, + .add => if (self.hasFeature(.avx)) .{ ._, .vaddss } else .{ ._, .addss }, + .sub => if (self.hasFeature(.avx)) .{ ._, .vsubss } else .{ ._, .subss }, + .mul => if (self.hasFeature(.avx)) .{ ._, .vmulss } else .{ ._, .mulss }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .vdivss else .divss, - .max => if (self.hasFeature(.avx)) .vmaxss else .maxss, - .min => if (self.hasFeature(.avx)) .vminss else .minss, + => if (self.hasFeature(.avx)) .{ ._, .vdivss } else .{ ._, .divss }, + .max => if (self.hasFeature(.avx)) .{ ._, .vmaxss } else .{ ._, .maxss }, + .min => if (self.hasFeature(.avx)) .{ ._, .vminss } else .{ ._, .minss }, else => unreachable, }, 2...4 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .vaddps else .addps, - .sub => if (self.hasFeature(.avx)) .vsubps else .subps, - .mul => if (self.hasFeature(.avx)) .vmulps else .mulps, + .add => if (self.hasFeature(.avx)) .{ ._, .vaddps } else .{ ._, .addps }, + .sub => if (self.hasFeature(.avx)) .{ ._, .vsubps } else .{ ._, .subps }, + .mul => if (self.hasFeature(.avx)) .{ ._, .vmulps } else .{ ._, .mulps }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .vdivps else .divps, - .max => if (self.hasFeature(.avx)) .vmaxps else .maxps, - .min => if (self.hasFeature(.avx)) .vminps else .minps, + => if (self.hasFeature(.avx)) .{ ._, .vdivps } else .{ ._, .divps }, + .max => if (self.hasFeature(.avx)) .{ ._, .vmaxps } else .{ ._, .maxps }, + .min => if (self.hasFeature(.avx)) .{ ._, .vminps } else .{ ._, .minps }, else => unreachable, }, 5...8 => if (self.hasFeature(.avx)) switch (air_tag) { - .add => .vaddps, - .sub => .vsubps, - .mul => .vmulps, - .div_float, .div_trunc, .div_floor, .div_exact => .vdivps, - .max => .vmaxps, - .min => .vminps, + .add => .{ ._, .vaddps }, + .sub => .{ ._, .vsubps }, + .mul => .{ ._, .vmulps }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivps }, + .max => .{ ._, .vmaxps }, + .min => .{ ._, .vminps }, else => unreachable, } else null, else => null, }, 64 => switch (lhs_ty.vectorLen()) { 1 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .vaddsd else .addsd, - .sub => if (self.hasFeature(.avx)) .vsubsd else .subsd, - .mul => if (self.hasFeature(.avx)) .vmulsd else .mulsd, + .add => if (self.hasFeature(.avx)) .{ ._, .vaddsd } else .{ ._, .addsd }, + .sub => if (self.hasFeature(.avx)) .{ ._, .vsubsd } else .{ ._, .subsd }, + .mul => if (self.hasFeature(.avx)) .{ ._, .vmulsd } else .{ ._, .mulsd }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .vdivsd else .divsd, - .max => if (self.hasFeature(.avx)) .vmaxsd else .maxsd, - .min => if (self.hasFeature(.avx)) .vminsd else .minsd, + => if (self.hasFeature(.avx)) .{ ._, .vdivsd } else .{ ._, .divsd }, + .max => if (self.hasFeature(.avx)) .{ ._, .vmaxsd } else .{ ._, .maxsd }, + .min => if (self.hasFeature(.avx)) .{ ._, .vminsd } else .{ ._, .minsd }, else => unreachable, }, 2 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .vaddpd else .addpd, - .sub => if (self.hasFeature(.avx)) .vsubpd else .subpd, - .mul => if (self.hasFeature(.avx)) .vmulpd else .mulpd, + .add => if (self.hasFeature(.avx)) .{ ._, .vaddpd } else .{ ._, .addpd }, + .sub => if (self.hasFeature(.avx)) .{ ._, .vsubpd } else .{ ._, .subpd }, + .mul => if (self.hasFeature(.avx)) .{ ._, .vmulpd } else .{ ._, .mulpd }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .vdivpd else .divpd, - .max => if (self.hasFeature(.avx)) .vmaxpd else .maxpd, - .min => if (self.hasFeature(.avx)) .vminpd else .minpd, + => if (self.hasFeature(.avx)) .{ ._, .vdivpd } else .{ ._, .divpd }, + .max => if (self.hasFeature(.avx)) .{ ._, .vmaxpd } else .{ ._, .maxpd }, + .min => if (self.hasFeature(.avx)) .{ ._, .vminpd } else .{ ._, .minpd }, else => unreachable, }, 3...4 => if (self.hasFeature(.avx)) switch (air_tag) { - .add => .vaddpd, - .sub => .vsubpd, - .mul => .vmulpd, - .div_float, .div_trunc, .div_floor, .div_exact => .vdivpd, - .max => .vmaxpd, - .min => .vminpd, + .add => .{ ._, .vaddpd }, + .sub => .{ ._, .vsubpd }, + .mul => .{ ._, .vmulpd }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivpd }, + .max => .{ ._, .vmaxpd }, + .min => .{ ._, .vminpd }, else => unreachable, } else null, else => null, @@ -6583,7 +6698,13 @@ fn genBinOp( return dst_mcv; } -fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void { +fn genBinOpMir( + self: *Self, + mir_tag: Mir.Inst.FixedTag, + ty: Type, + dst_mcv: MCValue, + src_mcv: MCValue, +) !void { const abi_size = @intCast(u32, ty.abiSize(self.target.*)); switch (dst_mcv) { .none, @@ -6788,14 +6909,14 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s }; var off: i32 = 0; while (off < abi_size) : (off += 8) { - const mir_limb_tag = switch (off) { + const mir_limb_tag: Mir.Inst.FixedTag = switch (off) { 0 => mir_tag, - else => switch (mir_tag) { - .add => .adc, - .sub, .cmp => .sbb, + else => switch (mir_tag[1]) { + .add => .{ ._, .adc }, + .sub, .cmp => .{ ._, .sbb }, .@"or", .@"and", .xor => mir_tag, else => return self.fail("TODO genBinOpMir implement large ABI for {s}", .{ - @tagName(mir_tag), + @tagName(mir_tag[1]), }), }, }; @@ -6967,14 +7088,14 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .reserved_frame, => unreachable, .register => |src_reg| try self.asmRegisterRegister( - .imul, + .{ .i_, .mul }, dst_alias, registerAlias(src_reg, abi_size), ), .immediate => |imm| { if (math.cast(i32, imm)) |small| { try self.asmRegisterRegisterImmediate( - .imul, + .{ .i_, .mul }, dst_alias, dst_alias, Immediate.s(small), @@ -6994,19 +7115,19 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .lea_tlv, .lea_frame, => try self.asmRegisterRegister( - .imul, + .{ .i_, .mul }, dst_alias, registerAlias(try self.copyToTmpRegister(dst_ty, src_mcv), abi_size), ), .memory, .indirect, .load_frame => try self.asmRegisterMemory( - .imul, + .{ .i_, .mul }, dst_alias, Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) { .memory => |addr| .{ .base = .{ .reg = .ds }, .disp = math.cast(i32, @bitCast(i64, addr)) orelse return self.asmRegisterRegister( - .imul, + .{ .i_, .mul }, dst_alias, registerAlias(try self.copyToTmpRegister(dst_ty, src_mcv), abi_size), ), @@ -7131,12 +7252,12 @@ fn genVarDbgInfo( } fn airTrap(self: *Self) !void { - try self.asmOpOnly(.ud2); + try self.asmOpOnly(.{ ._, .ud2 }); return self.finishAirBookkeeping(); } fn airBreakpoint(self: *Self) !void { - try self.asmOpOnly(.int3); + try self.asmOpOnly(.{ ._, .int3 }); return self.finishAirBookkeeping(); } @@ -7157,7 +7278,7 @@ fn airFence(self: *Self, inst: Air.Inst.Index) !void { switch (order) { .Unordered, .Monotonic => unreachable, .Acquire, .Release, .AcqRel => {}, - .SeqCst => try self.asmOpOnly(.mfence), + .SeqCst => try self.asmOpOnly(.{ ._, .mfence }), } return self.finishAirBookkeeping(); } @@ -7251,7 +7372,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = atom.getOffsetTableAddress(elf_file); - try self.asmMemory(.call, Memory.sib(.qword, .{ + try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr), })); @@ -7259,12 +7380,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = coff_file.getAtom(atom).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); - try self.asmRegister(.call, .rax); + try self.asmRegister(.{ ._, .call }, .rax); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); - try self.asmRegister(.call, .rax); + try self.asmRegister(.{ ._, .call }, .rax); } else if (self.bin_file.cast(link.File.Plan9)) |p9| { const decl_block_index = try p9.seeDecl(func.owner_decl); const decl_block = p9.getDeclBlock(decl_block_index); @@ -7273,7 +7394,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const got_addr = p9.bases.data; const got_index = decl_block.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; - try self.asmMemory(.call, Memory.sib(.qword, .{ + try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, fn_got_addr), })); @@ -7296,7 +7417,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier }), } }, }); - try self.asmRegister(.call, .rax); + try self.asmRegister(.{ ._, .call }, .rax); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom_index = try self.owner.getSymbolIndex(self); const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); @@ -7318,7 +7439,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier assert(ty.zigTypeTag() == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(.rax, Type.usize, mcv); - try self.asmRegister(.call, .rax); + try self.asmRegister(.{ ._, .call }, .rax); } var bt = self.liveness.iterateBigTomb(inst); @@ -7408,7 +7529,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const src_mcv = if (flipped) lhs_mcv else rhs_mcv; - try self.genBinOpMir(.cmp, ty, dst_mcv, src_mcv); + try self.genBinOpMir(.{ ._, .cmp }, ty, dst_mcv, src_mcv); break :result Condition.fromCompareOperator( if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned, if (flipped) op.reverse() else op, @@ -7442,13 +7563,13 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer self.register_manager.unlockReg(tmp2_lock); if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( - .vpinsrw, + .{ ._, .vpinsrw }, tmp1_reg, dst_reg.to128(), src_mcv.mem(.word), Immediate.u(1), ) else try self.asmRegisterRegisterRegister( - .vpunpcklwd, + .{ ._, .vpunpcklwd }, tmp1_reg, dst_reg.to128(), (if (src_mcv.isRegister()) @@ -7456,14 +7577,24 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { else try self.copyToTmpRegister(ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.vcvtph2ps, tmp1_reg, tmp1_reg); - try self.asmRegisterRegister(.vmovshdup, tmp2_reg, tmp1_reg); - try self.genBinOpMir(.ucomiss, ty, tmp1_mcv, tmp2_mcv); + try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, tmp1_reg, tmp1_reg); + try self.asmRegisterRegister(.{ ._, .vmovshdup }, tmp2_reg, tmp1_reg); + try self.genBinOpMir(.{ ._, .ucomiss }, ty, tmp1_mcv, tmp2_mcv); } else return self.fail("TODO implement airCmp for {}", .{ ty.fmt(self.bin_file.options.module.?), }), - 32 => try self.genBinOpMir(.ucomiss, ty, .{ .register = dst_reg }, src_mcv), - 64 => try self.genBinOpMir(.ucomisd, ty, .{ .register = dst_reg }, src_mcv), + 32 => try self.genBinOpMir( + .{ ._, .ucomiss }, + ty, + .{ .register = dst_reg }, + src_mcv, + ), + 64 => try self.genBinOpMir( + .{ ._, .ucomisd }, + ty, + .{ .register = dst_reg }, + src_mcv, + ), else => return self.fail("TODO implement airCmp for {}", .{ ty.fmt(self.bin_file.options.module.?), }), @@ -7507,7 +7638,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { else => try self.copyToTmpRegister(op_ty, op_mcv), }; try self.asmRegisterMemory( - .cmp, + .{ ._, .cmp }, registerAlias(dst_reg, op_abi_size), Memory.sib(Memory.PtrSize.fromSize(op_abi_size), .{ .base = .{ .reg = addr_reg } }), ); @@ -7627,7 +7758,7 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { }, .register => |reg| { try self.spillEflagsIfOccupied(); - try self.asmRegisterImmediate(.@"test", reg, Immediate.u(1)); + try self.asmRegisterImmediate(.{ ._, .@"test" }, reg, Immediate.u(1)); return self.asmJccReloc(undefined, .e); }, .immediate, @@ -7730,13 +7861,13 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); const alias_reg = registerAlias(opt_reg, some_abi_size); assert(some_abi_size * 8 == alias_reg.bitSize()); - try self.asmRegisterRegister(.@"test", alias_reg, alias_reg); + try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } assert(some_info.ty.tag() == .bool); const opt_abi_size = @intCast(u32, opt_ty.abiSize(self.target.*)); try self.asmRegisterImmediate( - .bt, + .{ ._, .bt }, registerAlias(opt_reg, opt_abi_size), Immediate.u(@intCast(u6, some_info.off * 8)), ); @@ -7755,7 +7886,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC try self.genSetReg(addr_reg, Type.usize, opt_mcv.address()); const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); try self.asmMemoryImmediate( - .cmp, + .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ .base = .{ .reg = addr_reg }, .disp = some_info.off, @@ -7768,7 +7899,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .indirect, .load_frame => { const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); try self.asmMemoryImmediate( - .cmp, + .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) { .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, @@ -7810,7 +7941,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); try self.asmMemoryImmediate( - .cmp, + .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ .base = .{ .reg = ptr_reg }, .disp = some_info.off, @@ -7841,14 +7972,24 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! const tmp_reg = try self.copyToTmpRegister(ty, operand); if (err_off > 0) { const shift = @intCast(u6, err_off * 8); - try self.genShiftBinOpMir(.shr, ty, .{ .register = tmp_reg }, .{ .immediate = shift }); + try self.genShiftBinOpMir( + .{ ._r, .sh }, + ty, + .{ .register = tmp_reg }, + .{ .immediate = shift }, + ); } else { try self.truncateRegister(Type.anyerror, tmp_reg); } - try self.genBinOpMir(.cmp, Type.anyerror, .{ .register = tmp_reg }, .{ .immediate = 0 }); + try self.genBinOpMir( + .{ ._, .cmp }, + Type.anyerror, + .{ .register = tmp_reg }, + .{ .immediate = 0 }, + ); }, .load_frame => |frame_addr| try self.genBinOpMir( - .cmp, + .{ ._, .cmp }, Type.anyerror, .{ .load_frame = .{ .index = frame_addr.index, @@ -8073,7 +8214,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { try self.spillEflagsIfOccupied(); for (items, relocs, 0..) |item, *reloc, i| { const item_mcv = try self.resolveInst(item); - try self.genBinOpMir(.cmp, condition_ty, condition, item_mcv); + try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition, item_mcv); reloc.* = try self.asmJccReloc(undefined, if (i < relocs.len - 1) .e else .ne); } @@ -8284,7 +8425,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { .qword else null; - const mnem = mnem: { + const mnem_tag = Mir.Inst.FixedTag{ ._, mnem: { if (mnem_size) |_| { if (std.meta.stringToEnum(Mir.Inst.Tag, mnem_str[0 .. mnem_str.len - 1])) |mnem| { break :mnem mnem; @@ -8292,7 +8433,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } break :mnem std.meta.stringToEnum(Mir.Inst.Tag, mnem_str) orelse return self.fail("Invalid mnemonic: '{s}'", .{mnem_str}); - }; + } }; var op_it = mem.tokenize(u8, mnem_it.rest(), ","); var ops = [1]encoder.Instruction.Operand{.none} ** 4; @@ -8343,51 +8484,51 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } else if (op_it.next()) |op_str| return self.fail("Extra operand: '{s}'", .{op_str}); (switch (ops[0]) { - .none => self.asmOpOnly(mnem), + .none => self.asmOpOnly(mnem_tag), .reg => |reg0| switch (ops[1]) { - .none => self.asmRegister(mnem, reg0), + .none => self.asmRegister(mnem_tag, reg0), .reg => |reg1| switch (ops[2]) { - .none => self.asmRegisterRegister(mnem, reg1, reg0), + .none => self.asmRegisterRegister(mnem_tag, reg1, reg0), .reg => |reg2| switch (ops[3]) { - .none => self.asmRegisterRegisterRegister(mnem, reg2, reg1, reg0), + .none => self.asmRegisterRegisterRegister(mnem_tag, reg2, reg1, reg0), else => error.InvalidInstruction, }, .mem => |mem2| switch (ops[3]) { - .none => self.asmMemoryRegisterRegister(mnem, mem2, reg1, reg0), + .none => self.asmMemoryRegisterRegister(mnem_tag, mem2, reg1, reg0), else => error.InvalidInstruction, }, else => error.InvalidInstruction, }, .mem => |mem1| switch (ops[2]) { - .none => self.asmMemoryRegister(mnem, mem1, reg0), + .none => self.asmMemoryRegister(mnem_tag, mem1, reg0), else => error.InvalidInstruction, }, else => error.InvalidInstruction, }, .mem => |mem0| switch (ops[1]) { - .none => self.asmMemory(mnem, mem0), + .none => self.asmMemory(mnem_tag, mem0), .reg => |reg1| switch (ops[2]) { - .none => self.asmRegisterMemory(mnem, reg1, mem0), + .none => self.asmRegisterMemory(mnem_tag, reg1, mem0), else => error.InvalidInstruction, }, else => error.InvalidInstruction, }, .imm => |imm0| switch (ops[1]) { - .none => self.asmImmediate(mnem, imm0), + .none => self.asmImmediate(mnem_tag, imm0), .reg => |reg1| switch (ops[2]) { - .none => self.asmRegisterImmediate(mnem, reg1, imm0), + .none => self.asmRegisterImmediate(mnem_tag, reg1, imm0), .reg => |reg2| switch (ops[3]) { - .none => self.asmRegisterRegisterImmediate(mnem, reg2, reg1, imm0), + .none => self.asmRegisterRegisterImmediate(mnem_tag, reg2, reg1, imm0), else => error.InvalidInstruction, }, .mem => |mem2| switch (ops[3]) { - .none => self.asmMemoryRegisterImmediate(mnem, mem2, reg1, imm0), + .none => self.asmMemoryRegisterImmediate(mnem_tag, mem2, reg1, imm0), else => error.InvalidInstruction, }, else => error.InvalidInstruction, }, .mem => |mem1| switch (ops[2]) { - .none => self.asmMemoryImmediate(mnem, mem1, imm0), + .none => self.asmMemoryImmediate(mnem_tag, mem1, imm0), else => error.InvalidInstruction, }, else => error.InvalidInstruction, @@ -8396,7 +8537,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { error.InvalidInstruction => return self.fail( "Invalid instruction: '{s} {s} {s} {s} {s}'", .{ - @tagName(mnem), + @tagName(mnem_tag[1]), @tagName(ops[0]), @tagName(ops[1]), @tagName(ops[2]), @@ -8427,44 +8568,47 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { return self.finishAirResult(inst, result); } -fn movMirTag(self: *Self, ty: Type, aligned: bool) !Mir.Inst.Tag { +fn movMirTag(self: *Self, ty: Type, aligned: bool) !Mir.Inst.FixedTag { switch (ty.zigTypeTag()) { - else => return .mov, + else => return .{ ._, .mov }, .Float => switch (ty.floatBits(self.target.*)) { 16 => unreachable, // needs special handling - 32 => return if (self.hasFeature(.avx)) .vmovss else .movss, - 64 => return if (self.hasFeature(.avx)) .vmovsd else .movsd, + 32 => return if (self.hasFeature(.avx)) .{ ._, .vmovss } else .{ ._, .movss }, + 64 => return if (self.hasFeature(.avx)) .{ ._, .vmovsd } else .{ ._, .movsd }, 128 => return if (self.hasFeature(.avx)) - if (aligned) .vmovaps else .vmovups - else if (aligned) .movaps else .movups, + if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups } + else if (aligned) .{ ._, .movaps } else .{ ._, .movups }, else => {}, }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { 16 => switch (ty.vectorLen()) { 1 => unreachable, // needs special handling - 2 => return if (self.hasFeature(.avx)) .vmovss else .movss, - 3...4 => return if (self.hasFeature(.avx)) .vmovsd else .movsd, + 2 => return if (self.hasFeature(.avx)) .{ ._, .vmovss } else .{ ._, .movss }, + 3...4 => return if (self.hasFeature(.avx)) .{ ._, .vmovsd } else .{ ._, .movsd }, 5...8 => return if (self.hasFeature(.avx)) - if (aligned) .vmovaps else .vmovups - else if (aligned) .movaps else .movups, - 9...16 => if (self.hasFeature(.avx)) return if (aligned) .vmovaps else .vmovups, + if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups } + else if (aligned) .{ ._, .movaps } else .{ ._, .movups }, + 9...16 => if (self.hasFeature(.avx)) + return if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups }, else => {}, }, 32 => switch (ty.vectorLen()) { - 1 => return if (self.hasFeature(.avx)) .vmovss else .movss, + 1 => return if (self.hasFeature(.avx)) .{ ._, .vmovss } else .{ ._, .movss }, 2...4 => return if (self.hasFeature(.avx)) - if (aligned) .vmovaps else .vmovups - else if (aligned) .movaps else .movups, - 5...8 => if (self.hasFeature(.avx)) return if (aligned) .vmovaps else .vmovups, + if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups } + else if (aligned) .{ ._, .movaps } else .{ ._, .movups }, + 5...8 => if (self.hasFeature(.avx)) + return if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups }, else => {}, }, 64 => switch (ty.vectorLen()) { - 1 => return if (self.hasFeature(.avx)) .vmovsd else .movsd, + 1 => return if (self.hasFeature(.avx)) .{ ._, .vmovsd } else .{ ._, .movsd }, 2 => return if (self.hasFeature(.avx)) - if (aligned) .vmovaps else .vmovups - else if (aligned) .movaps else .movups, - 3...4 => if (self.hasFeature(.avx)) return if (aligned) .vmovaps else .vmovups, + if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups } + else if (aligned) .{ ._, .movaps } else .{ ._, .movups }, + 3...4 => if (self.hasFeature(.avx)) + return if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups }, else => {}, }, else => {}, @@ -8558,19 +8702,19 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr if (imm == 0) { // 32-bit moves zero-extend to 64-bit, so xoring the 32-bit // register is the fastest way to zero a register. - try self.asmRegisterRegister(.xor, dst_reg.to32(), dst_reg.to32()); + try self.asmRegisterRegister(.{ ._, .xor }, dst_reg.to32(), dst_reg.to32()); } else if (abi_size > 4 and math.cast(u32, imm) != null) { // 32-bit moves zero-extend to 64-bit. - try self.asmRegisterImmediate(.mov, dst_reg.to32(), Immediate.u(imm)); + try self.asmRegisterImmediate(.{ ._, .mov }, dst_reg.to32(), Immediate.u(imm)); } else if (abi_size <= 4 and @bitCast(i64, imm) < 0) { try self.asmRegisterImmediate( - .mov, + .{ ._, .mov }, registerAlias(dst_reg, abi_size), Immediate.s(@intCast(i32, @bitCast(i64, imm))), ); } else { try self.asmRegisterImmediate( - .mov, + .{ ._, .mov }, registerAlias(dst_reg, abi_size), Immediate.u(imm), ); @@ -8579,18 +8723,18 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .register => |src_reg| if (dst_reg.id() != src_reg.id()) try self.asmRegisterRegister( if ((dst_reg.class() == .floating_point) == (src_reg.class() == .floating_point)) switch (ty.zigTypeTag()) { - else => .mov, - .Float, .Vector => .movaps, + else => .{ ._, .mov }, + .Float, .Vector => .{ ._, .movaps }, } else switch (abi_size) { 2 => return try self.asmRegisterRegisterImmediate( - if (dst_reg.class() == .floating_point) .pinsrw else .pextrw, + if (dst_reg.class() == .floating_point) .{ ._, .pinsrw } else .{ ._, .pextrw }, registerAlias(dst_reg, 4), registerAlias(src_reg, 4), Immediate.u(0), ), - 4 => .movd, - 8 => .movq, + 4 => .{ ._d, .mov }, + 8 => .{ ._q, .mov }, else => return self.fail( "unsupported register copy from {s} to {s}", .{ @tagName(src_reg), @tagName(dst_reg) }, @@ -8617,7 +8761,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }); if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) try self.asmRegisterMemoryImmediate( - .pinsrw, + .{ ._, .pinsrw }, registerAlias(dst_reg, abi_size), src_mem, Immediate.u(0), @@ -8627,14 +8771,14 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr switch (src_mcv) { .register_offset => |reg_off| switch (reg_off.off) { 0 => return self.genSetReg(dst_reg, ty, .{ .register = reg_off.reg }), - else => .lea, + else => .{ ._, .lea }, }, .indirect => try self.movMirTag(ty, false), .load_frame => |frame_addr| try self.movMirTag( ty, self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(self.target.*), ), - .lea_frame => .lea, + .lea_frame => .{ ._, .lea }, else => unreachable, }, registerAlias(dst_reg, abi_size), @@ -8650,7 +8794,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }); return if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) self.asmRegisterMemoryImmediate( - .pinsrw, + .{ ._, .pinsrw }, registerAlias(dst_reg, abi_size), src_mem, Immediate.u(0), @@ -8694,7 +8838,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }); if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) try self.asmRegisterMemoryImmediate( - .pinsrw, + .{ ._, .pinsrw }, registerAlias(dst_reg, abi_size), src_mem, Immediate.u(0), @@ -8743,7 +8887,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr } }, }); // TODO: spill registers before calling - try self.asmMemory(.call, Memory.sib(.qword, .{ .base = .{ .reg = .rdi } })); + try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .rdi } })); try self.genSetReg(dst_reg.to64(), Type.usize, .{ .register = .rax }); } else return self.fail("TODO emit ptr to TLV sequence on {s}", .{ @tagName(self.bin_file.tag), @@ -8770,7 +8914,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal else Immediate.u(@intCast(u32, imm)); try self.asmMemoryImmediate( - .mov, + .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }), immediate, ); @@ -8778,14 +8922,14 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal 3, 5...7 => unreachable, else => if (math.cast(i32, @bitCast(i64, imm))) |small| { try self.asmMemoryImmediate( - .mov, + .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }), Immediate.s(small), ); } else { var offset: i32 = 0; while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate( - .mov, + .{ ._, .mov }, Memory.sib(.dword, .{ .base = base, .disp = disp + offset }), if (ty.isSignedInt()) Immediate.s(@truncate( @@ -8808,7 +8952,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal ); if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) try self.asmMemoryRegisterImmediate( - .pextrw, + .{ ._, .pextrw }, dst_mem, src_reg.to128(), Immediate.u(0), @@ -8904,7 +9048,7 @@ fn genInlineMemcpyRegisterRegister( while (remainder > 0) { const nearest_power_of_two = @as(u6, 1) << math.log2_int(u3, @intCast(u3, remainder)); try self.asmMemoryRegister( - .mov, + .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(nearest_power_of_two), .{ .base = dst_reg, .disp = -next_offset, @@ -8913,7 +9057,7 @@ fn genInlineMemcpyRegisterRegister( ); if (nearest_power_of_two > 1) { - try self.genShiftBinOpMir(.shr, ty, .{ .register = tmp_reg }, .{ + try self.genShiftBinOpMir(.{ ._r, .sh }, ty, .{ .register = tmp_reg }, .{ .immediate = nearest_power_of_two * 8, }); } @@ -8924,8 +9068,8 @@ fn genInlineMemcpyRegisterRegister( } else { try self.asmMemoryRegister( switch (src_reg.class()) { - .general_purpose, .segment => .mov, - .floating_point => .movss, + .general_purpose, .segment => .{ ._, .mov }, + .floating_point => .{ ._, .movss }, }, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = dst_reg, .disp = -offset }), registerAlias(src_reg, abi_size), @@ -8938,11 +9082,7 @@ fn genInlineMemcpy(self: *Self, dst_ptr: MCValue, src_ptr: MCValue, len: MCValue try self.genSetReg(.rdi, Type.usize, dst_ptr); try self.genSetReg(.rsi, Type.usize, src_ptr); try self.genSetReg(.rcx, Type.usize, len); - _ = try self.addInst(.{ - .tag = .mov, - .ops = .none, - .data = .{ .none = .{ .fixes = .@"rep _sb" } }, - }); + try self.asmOpOnly(.{ .@"rep _sb", .mov }); } fn genInlineMemset(self: *Self, dst_ptr: MCValue, value: MCValue, len: MCValue) InnerError!void { @@ -8950,11 +9090,7 @@ fn genInlineMemset(self: *Self, dst_ptr: MCValue, value: MCValue, len: MCValue) try self.genSetReg(.rdi, Type.usize, dst_ptr); try self.genSetReg(.al, Type.u8, value); try self.genSetReg(.rcx, Type.usize, len); - _ = try self.addInst(.{ - .tag = .sto, - .ops = .none, - .data = .{ .none = .{ .fixes = .@"rep _sb" } }, - }); + try self.asmOpOnly(.{ .@"rep _sb", .sto }); } fn genLazySymbolRef( @@ -8972,14 +9108,14 @@ fn genLazySymbolRef( const got_mem = Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }); switch (tag) { - .lea, .mov => try self.asmRegisterMemory(.mov, reg.to64(), got_mem), - .call => try self.asmMemory(.call, got_mem), + .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem), + .call => try self.asmMemory(.{ ._, .call }, got_mem), else => unreachable, } switch (tag) { .lea, .call => {}, .mov => try self.asmRegisterMemory( - tag, + .{ ._, tag }, reg.to64(), Memory.sib(.qword, .{ .base = .{ .reg = reg.to64() } }), ), @@ -8996,7 +9132,7 @@ fn genLazySymbolRef( } switch (tag) { .lea, .mov => {}, - .call => try self.asmRegister(.call, reg), + .call => try self.asmRegister(.{ ._, .call }, reg), else => unreachable, } } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { @@ -9010,7 +9146,7 @@ fn genLazySymbolRef( } switch (tag) { .lea, .mov => {}, - .call => try self.asmRegister(.call, reg), + .call => try self.asmRegister(.{ ._, .call }, reg), else => unreachable, } } else { @@ -9115,13 +9251,13 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { try self.asmRegisterRegister(switch (dst_ty.floatBits(self.target.*)) { 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) - .cvtsi2ss + .{ ._, .cvtsi2ss } else return self.fail("TODO implement airIntToFloat from {} to {} without sse", .{ src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) - .cvtsi2sd + .{ ._, .cvtsi2sd } else return self.fail("TODO implement airIntToFloat from {} to {} without sse2", .{ src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), @@ -9161,7 +9297,7 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { }, }; try self.asmMemory( - .fld, + .{ .f_, .ld }, Memory.sib(Memory.PtrSize.fromSize(src_abi_size), .{ .base = .{ .frame = frame_addr.index }, .disp = frame_addr.off, @@ -9171,7 +9307,7 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { // convert const stack_dst = try self.allocRegOrMem(inst, false); try self.asmMemory( - .fisttp, + .{ .f_p, .istt }, Memory.sib(Memory.PtrSize.fromSize(dst_abi_size), .{ .base = .{ .frame = stack_dst.load_frame.index }, .disp = stack_dst.load_frame.off, @@ -9227,22 +9363,11 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); try self.spillEflagsIfOccupied(); - _ = try self.addInst(if (val_abi_size <= 8) .{ - .tag = .cmpxchg, - .ops = .mr_sib, - .data = .{ .rx = .{ - .fixes = .@"lock _", - .r1 = registerAlias(new_reg.?, val_abi_size), - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } }, - } else .{ - .tag = .cmpxchg, - .ops = .m_sib, - .data = .{ .x = .{ - .fixes = .@"lock _16b", - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } }, - }); + if (val_abi_size <= 8) try self.asmMemoryRegister( + .{ .@"lock _", .cmpxchg }, + ptr_mem, + registerAlias(new_reg.?, val_abi_size), + ) else try self.asmMemory(.{ .@"lock _16b", .cmpxchg }, ptr_mem); const result: MCValue = result: { if (self.liveness.isUnused(inst)) break :result .unreach; @@ -9340,21 +9465,17 @@ fn atomicOp( try self.genSetReg(dst_reg, val_ty, val_mcv); if (rmw_op == std.builtin.AtomicRmwOp.Sub and tag == .xadd) { - try self.genUnOpMir(.neg, val_ty, dst_mcv); + try self.genUnOpMir(.{ ._, .neg }, val_ty, dst_mcv); } - _ = try self.addInst(.{ - .tag = tag, - .ops = .mr_sib, - .data = .{ .rx = .{ - .fixes = switch (tag) { - .mov, .xchg => ._, - .xadd, .add, .sub, .@"and", .@"or", .xor => .@"lock _", - else => unreachable, - }, - .r1 = registerAlias(dst_reg, val_abi_size), - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } }, - }); + try self.asmMemoryRegister( + switch (tag) { + .mov, .xchg => .{ ._, tag }, + .xadd, .add, .sub, .@"and", .@"or", .xor => .{ .@"lock _", tag }, + else => unreachable, + }, + ptr_mem, + registerAlias(dst_reg, val_abi_size), + ); return if (unused) .unreach else dst_mcv; }, @@ -9364,22 +9485,22 @@ fn atomicOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - try self.asmRegisterMemory(.mov, registerAlias(.rax, val_abi_size), ptr_mem); + try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(.rax, val_abi_size), ptr_mem); const loop = @intCast(u32, self.mir_instructions.len); if (rmw_op != std.builtin.AtomicRmwOp.Xchg) { try self.genSetReg(tmp_reg, val_ty, .{ .register = .rax }); } if (rmw_op) |op| switch (op) { .Xchg => try self.genSetReg(tmp_reg, val_ty, val_mcv), - .Add => try self.genBinOpMir(.add, val_ty, tmp_mcv, val_mcv), - .Sub => try self.genBinOpMir(.sub, val_ty, tmp_mcv, val_mcv), - .And => try self.genBinOpMir(.@"and", val_ty, tmp_mcv, val_mcv), + .Add => try self.genBinOpMir(.{ ._, .add }, val_ty, tmp_mcv, val_mcv), + .Sub => try self.genBinOpMir(.{ ._, .sub }, val_ty, tmp_mcv, val_mcv), + .And => try self.genBinOpMir(.{ ._, .@"and" }, val_ty, tmp_mcv, val_mcv), .Nand => { - try self.genBinOpMir(.@"and", val_ty, tmp_mcv, val_mcv); - try self.genUnOpMir(.not, val_ty, tmp_mcv); + try self.genBinOpMir(.{ ._, .@"and" }, val_ty, tmp_mcv, val_mcv); + try self.genUnOpMir(.{ ._, .not }, val_ty, tmp_mcv); }, - .Or => try self.genBinOpMir(.@"or", val_ty, tmp_mcv, val_mcv), - .Xor => try self.genBinOpMir(.xor, val_ty, tmp_mcv, val_mcv), + .Or => try self.genBinOpMir(.{ ._, .@"or" }, val_ty, tmp_mcv, val_mcv), + .Xor => try self.genBinOpMir(.{ ._, .xor }, val_ty, tmp_mcv, val_mcv), .Min, .Max => { const cc: Condition = switch (if (val_ty.isAbiInt()) val_ty.intInfo(self.target.*).signedness @@ -9397,7 +9518,7 @@ fn atomicOp( }, }; - try self.genBinOpMir(.cmp, val_ty, tmp_mcv, val_mcv); + try self.genBinOpMir(.{ ._, .cmp }, val_ty, tmp_mcv, val_mcv); const cmov_abi_size = @max(val_abi_size, 2); switch (val_mcv) { .register => |val_reg| try self.asmCmovccRegisterRegister( @@ -9421,24 +9542,20 @@ fn atomicOp( } }, }; - _ = try self.addInst(.{ - .tag = .cmpxchg, - .ops = .mr_sib, - .data = .{ .rx = .{ - .fixes = .@"lock _", - .r1 = registerAlias(tmp_reg, val_abi_size), - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } }, - }); + try self.asmMemoryRegister( + .{ .@"lock _", .cmpxchg }, + ptr_mem, + registerAlias(tmp_reg, val_abi_size), + ); _ = try self.asmJccReloc(loop, .ne); return if (unused) .unreach else .{ .register = .rax }; } else { - try self.asmRegisterMemory(.mov, .rax, Memory.sib(.qword, .{ + try self.asmRegisterMemory(.{ ._, .mov }, .rax, Memory.sib(.qword, .{ .base = ptr_mem.sib.base, .scale_index = ptr_mem.scaleIndex(), .disp = ptr_mem.sib.disp + 0, })); - try self.asmRegisterMemory(.mov, .rdx, Memory.sib(.qword, .{ + try self.asmRegisterMemory(.{ ._, .mov }, .rdx, Memory.sib(.qword, .{ .base = ptr_mem.sib.base, .scale_index = ptr_mem.scaleIndex(), .disp = ptr_mem.sib.disp + 8, @@ -9453,58 +9570,51 @@ fn atomicOp( const val_lo_mem = val_mem_mcv.mem(.qword); const val_hi_mem = val_mem_mcv.address().offset(8).deref().mem(.qword); if (rmw_op != std.builtin.AtomicRmwOp.Xchg) { - try self.asmRegisterRegister(.mov, .rbx, .rax); - try self.asmRegisterRegister(.mov, .rcx, .rdx); + try self.asmRegisterRegister(.{ ._, .mov }, .rbx, .rax); + try self.asmRegisterRegister(.{ ._, .mov }, .rcx, .rdx); } if (rmw_op) |op| switch (op) { .Xchg => { - try self.asmRegisterMemory(.mov, .rbx, val_lo_mem); - try self.asmRegisterMemory(.mov, .rcx, val_hi_mem); + try self.asmRegisterMemory(.{ ._, .mov }, .rbx, val_lo_mem); + try self.asmRegisterMemory(.{ ._, .mov }, .rcx, val_hi_mem); }, .Add => { - try self.asmRegisterMemory(.add, .rbx, val_lo_mem); - try self.asmRegisterMemory(.adc, .rcx, val_hi_mem); + try self.asmRegisterMemory(.{ ._, .add }, .rbx, val_lo_mem); + try self.asmRegisterMemory(.{ ._, .adc }, .rcx, val_hi_mem); }, .Sub => { - try self.asmRegisterMemory(.sub, .rbx, val_lo_mem); - try self.asmRegisterMemory(.sbb, .rcx, val_hi_mem); + try self.asmRegisterMemory(.{ ._, .sub }, .rbx, val_lo_mem); + try self.asmRegisterMemory(.{ ._, .sbb }, .rcx, val_hi_mem); }, .And => { - try self.asmRegisterMemory(.@"and", .rbx, val_lo_mem); - try self.asmRegisterMemory(.@"and", .rcx, val_hi_mem); + try self.asmRegisterMemory(.{ ._, .@"and" }, .rbx, val_lo_mem); + try self.asmRegisterMemory(.{ ._, .@"and" }, .rcx, val_hi_mem); }, .Nand => { - try self.asmRegisterMemory(.@"and", .rbx, val_lo_mem); - try self.asmRegisterMemory(.@"and", .rcx, val_hi_mem); - try self.asmRegister(.not, .rbx); - try self.asmRegister(.not, .rcx); + try self.asmRegisterMemory(.{ ._, .@"and" }, .rbx, val_lo_mem); + try self.asmRegisterMemory(.{ ._, .@"and" }, .rcx, val_hi_mem); + try self.asmRegister(.{ ._, .not }, .rbx); + try self.asmRegister(.{ ._, .not }, .rcx); }, .Or => { - try self.asmRegisterMemory(.@"or", .rbx, val_lo_mem); - try self.asmRegisterMemory(.@"or", .rcx, val_hi_mem); + try self.asmRegisterMemory(.{ ._, .@"or" }, .rbx, val_lo_mem); + try self.asmRegisterMemory(.{ ._, .@"or" }, .rcx, val_hi_mem); }, .Xor => { - try self.asmRegisterMemory(.xor, .rbx, val_lo_mem); - try self.asmRegisterMemory(.xor, .rcx, val_hi_mem); + try self.asmRegisterMemory(.{ ._, .xor }, .rbx, val_lo_mem); + try self.asmRegisterMemory(.{ ._, .xor }, .rcx, val_hi_mem); }, else => return self.fail("TODO implement x86 atomic loop for {} {s}", .{ val_ty.fmt(self.bin_file.options.module.?), @tagName(op), }), }; - _ = try self.addInst(.{ - .tag = .cmpxchg, - .ops = .m_sib, - .data = .{ .x = .{ - .fixes = .@"lock _16b", - .payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)), - } }, - }); + try self.asmMemory(.{ .@"lock _16b", .cmpxchg }, ptr_mem); _ = try self.asmJccReloc(loop, .ne); if (unused) return .unreach; const dst_mcv = try self.allocTempRegOrMem(val_ty, false); try self.asmMemoryRegister( - .mov, + .{ ._, .mov }, Memory.sib(.qword, .{ .base = .{ .frame = dst_mcv.load_frame.index }, .disp = dst_mcv.load_frame.off + 0, @@ -9512,7 +9622,7 @@ fn atomicOp( .rax, ); try self.asmMemoryRegister( - .mov, + .{ ._, .mov }, Memory.sib(.qword, .{ .base = .{ .frame = dst_mcv.load_frame.index }, .disp = dst_mcv.load_frame.off + 8, @@ -9664,8 +9774,13 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { .off = elem_abi_size, } }); - try self.genBinOpMir(.sub, Type.usize, len_mcv, .{ .immediate = 1 }); - try self.asmRegisterRegisterImmediate(.imul, len_reg, len_reg, Immediate.u(elem_abi_size)); + try self.genBinOpMir(.{ ._, .sub }, Type.usize, len_mcv, .{ .immediate = 1 }); + try self.asmRegisterRegisterImmediate( + .{ .i_, .mul }, + len_reg, + len_reg, + Immediate.u(elem_abi_size), + ); try self.genInlineMemcpy(second_elem_ptr_mcv, ptr, len_mcv); try self.performReloc(skip_reloc); @@ -9803,7 +9918,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { try self.truncateRegister(err_ty, err_reg.to32()); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, start_reg.to32(), Memory.sib(.dword, .{ .base = .{ .reg = addr_reg.to64() }, @@ -9812,7 +9927,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { }), ); try self.asmRegisterMemory( - .mov, + .{ ._, .mov }, end_reg.to32(), Memory.sib(.dword, .{ .base = .{ .reg = addr_reg.to64() }, @@ -9820,9 +9935,9 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { .disp = 8, }), ); - try self.asmRegisterRegister(.sub, end_reg.to32(), start_reg.to32()); + try self.asmRegisterRegister(.{ ._, .sub }, end_reg.to32(), start_reg.to32()); try self.asmRegisterMemory( - .lea, + .{ ._, .lea }, start_reg.to64(), Memory.sib(.byte, .{ .base = .{ .reg = addr_reg.to64() }, @@ -9831,7 +9946,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { }), ); try self.asmRegisterMemory( - .lea, + .{ ._, .lea }, end_reg.to32(), Memory.sib(.byte, .{ .base = .{ .reg = end_reg.to64() }, @@ -9841,7 +9956,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = try self.allocRegOrMem(inst, false); try self.asmMemoryRegister( - .mov, + .{ ._, .mov }, Memory.sib(.qword, .{ .base = .{ .frame = dst_mcv.load_frame.index }, .disp = dst_mcv.load_frame.off, @@ -9849,7 +9964,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { start_reg.to64(), ); try self.asmMemoryRegister( - .mov, + .{ ._, .mov }, Memory.sib(.qword, .{ .base = .{ .frame = dst_mcv.load_frame.index }, .disp = dst_mcv.load_frame.off + 8, @@ -9945,13 +10060,13 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { try self.truncateRegister(elem_ty, elem_reg); } if (elem_bit_off > 0) try self.genShiftBinOpMir( - .shl, + .{ ._l, .sh }, elem_ty, .{ .register = elem_reg }, .{ .immediate = elem_bit_off }, ); try self.genBinOpMir( - .@"or", + .{ ._, .@"or" }, elem_ty, .{ .load_frame = .{ .index = frame_index, .off = elem_byte_off } }, .{ .register = elem_reg }, @@ -9962,13 +10077,13 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { try self.truncateRegister(elem_ty, registerAlias(reg, elem_abi_size)); } try self.genShiftBinOpMir( - .shr, + .{ ._r, .sh }, elem_ty, .{ .register = reg }, .{ .immediate = elem_abi_bits - elem_bit_off }, ); try self.genBinOpMir( - .@"or", + .{ ._, .@"or" }, elem_ty, .{ .load_frame = .{ .index = frame_index, @@ -10078,25 +10193,25 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } const mir_tag = if (@as( - ?Mir.Inst.Tag, + ?Mir.Inst.FixedTag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { - 32 => .vfmadd132ss, - 64 => .vfmadd132sd, + 32 => .{ ._, .vfmadd132ss }, + 64 => .{ ._, .vfmadd132sd }, 16, 80, 128 => null, else => unreachable, }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { - 1 => .vfmadd132ss, - 2...8 => .vfmadd132ps, + 1 => .{ ._, .vfmadd132ss }, + 2...8 => .{ ._, .vfmadd132ps }, else => null, }, 64 => switch (ty.vectorLen()) { - 1 => .vfmadd132sd, - 2...4 => .vfmadd132pd, + 1 => .{ ._, .vfmadd132sd }, + 2...4 => .{ ._, .vfmadd132pd }, else => null, }, 16, 80, 128 => null, @@ -10109,21 +10224,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { - 32 => .vfmadd213ss, - 64 => .vfmadd213sd, + 32 => .{ ._, .vfmadd213ss }, + 64 => .{ ._, .vfmadd213sd }, 16, 80, 128 => null, else => unreachable, }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { - 1 => .vfmadd213ss, - 2...8 => .vfmadd213ps, + 1 => .{ ._, .vfmadd213ss }, + 2...8 => .{ ._, .vfmadd213ps }, else => null, }, 64 => switch (ty.vectorLen()) { - 1 => .vfmadd213sd, - 2...4 => .vfmadd213pd, + 1 => .{ ._, .vfmadd213sd }, + 2...4 => .{ ._, .vfmadd213pd }, else => null, }, 16, 80, 128 => null, @@ -10136,21 +10251,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { - 32 => .vfmadd231ss, - 64 => .vfmadd231sd, + 32 => .{ ._, .vfmadd231ss }, + 64 => .{ ._, .vfmadd231sd }, 16, 80, 128 => null, else => unreachable, }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { - 1 => .vfmadd231ss, - 2...8 => .vfmadd231ps, + 1 => .{ ._, .vfmadd231ss }, + 2...8 => .{ ._, .vfmadd231ps }, else => null, }, 64 => switch (ty.vectorLen()) { - 1 => .vfmadd231sd, - 2...4 => .vfmadd231pd, + 1 => .{ ._, .vfmadd231sd }, + 2...4 => .{ ._, .vfmadd231pd }, else => null, }, 16, 80, 128 => null, @@ -10522,17 +10637,37 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { switch (int_info.signedness) { .signed => { const shift = @intCast(u6, max_reg_bit_width - int_info.bits); - try self.genShiftBinOpMir(.sal, Type.isize, .{ .register = reg }, .{ .immediate = shift }); - try self.genShiftBinOpMir(.sar, Type.isize, .{ .register = reg }, .{ .immediate = shift }); + try self.genShiftBinOpMir( + .{ ._l, .sa }, + Type.isize, + .{ .register = reg }, + .{ .immediate = shift }, + ); + try self.genShiftBinOpMir( + .{ ._r, .sa }, + Type.isize, + .{ .register = reg }, + .{ .immediate = shift }, + ); }, .unsigned => { const shift = @intCast(u6, max_reg_bit_width - int_info.bits); const mask = (~@as(u64, 0)) >> shift; if (int_info.bits <= 32) { - try self.genBinOpMir(.@"and", Type.u32, .{ .register = reg }, .{ .immediate = mask }); + try self.genBinOpMir( + .{ ._, .@"and" }, + Type.u32, + .{ .register = reg }, + .{ .immediate = mask }, + ); } else { const tmp_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = mask }); - try self.genBinOpMir(.@"and", Type.usize, .{ .register = reg }, .{ .register = tmp_reg }); + try self.genBinOpMir( + .{ ._, .@"and" }, + Type.usize, + .{ .register = reg }, + .{ .register = tmp_reg }, + ); } }, } diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 2d7fa4b4fd..c32e7fc974 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -286,10 +286,10 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void { .rri_s, .rri_u => inst.data.rri.fixes, .ri_s, .ri_u => inst.data.ri.fixes, .ri64, .rm_sib, .rm_rip, .mr_sib, .mr_rip => inst.data.rx.fixes, - .mi_sib_u, .mi_rip_u, .mi_sib_s, .mi_rip_s => ._, .mrr_sib, .mrr_rip, .rrm_sib, .rrm_rip => inst.data.rrx.fixes, .rmi_sib, .rmi_rip, .mri_sib, .mri_rip => inst.data.rix.fixes, .rrmi_sib, .rrmi_rip => inst.data.rrix.fixes, + .mi_sib_u, .mi_rip_u, .mi_sib_s, .mi_rip_s => inst.data.x.fixes, .m_sib, .m_rip, .rax_moffs, .moffs_rax => inst.data.x.fixes, .extern_fn_reloc, .got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ._, else => return lower.fail("TODO lower .{s}", .{@tagName(inst.ops)}), @@ -356,8 +356,11 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void { .{ .mem = lower.mem(inst.ops, inst.data.x.payload) }, }, .mi_sib_s, .mi_sib_u, .mi_rip_u, .mi_rip_s => &.{ - .{ .mem = lower.mem(inst.ops, inst.data.ix.payload) }, - .{ .imm = lower.imm(inst.ops, inst.data.ix.i) }, + .{ .mem = lower.mem(inst.ops, inst.data.x.payload + 1) }, + .{ .imm = lower.imm( + inst.ops, + lower.mir.extraData(Mir.Imm32, inst.data.x.payload).data.imm, + ) }, }, .rm_sib, .rm_rip => &.{ .{ .reg = inst.data.rx.r1 }, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 951a0c5d4d..6b5e2bded7 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -36,6 +36,18 @@ pub const Inst = struct { /// ___ @"_", + /// Integer __ + i_, + + /// ___ Left + _l, + /// ___ Left Double + _ld, + /// ___ Right + _r, + /// ___ Right Double + _rd, + /// ___ Above _a, /// ___ Above Or Equal @@ -53,7 +65,7 @@ pub const Inst = struct { /// ___ Greater Or Equal _ge, /// ___ Less - _l, + //_l, /// ___ Less Or Equal _le, /// ___ Not Above @@ -97,6 +109,15 @@ pub const Inst = struct { /// ___ Zero _z, + /// ___ Byte + //_b, + /// ___ Word + _w, + /// ___ Doubleword + _d, + /// ___ QuadWord + _q, + /// ___ String //_s, /// ___ String Byte @@ -165,6 +186,18 @@ pub const Inst = struct { /// Locked ___ @"lock _", + /// ___ And Complement + //_c, + /// Locked ___ And Complement + @"lock _c", + /// ___ And Reset + //_r, + /// Locked ___ And Reset + @"lock _r", + /// ___ And Set + //_s, + /// Locked ___ And Set + @"lock _s", /// ___ 8 Bytes _8b, /// Locked ___ 8 Bytes @@ -174,6 +207,11 @@ pub const Inst = struct { /// Locked ___ 16 Bytes @"lock _16b", + /// Float ___ + f_, + /// Float ___ Pop + f_p, + /// Packed ___ p_, /// Packed ___ Byte @@ -250,13 +288,10 @@ pub const Inst = struct { /// Byte swap bswap, /// Bit test - bt, /// Bit test and complement - btc, /// Bit test and reset - btr, /// Bit test and set - bts, + bt, /// Call call, /// Convert byte to word @@ -280,21 +315,18 @@ pub const Inst = struct { /// Convert word to doubleword cwde, /// Unsigned division - div, - /// Store integer with truncation - fisttp, - /// Load floating-point value - fld, /// Signed division - idiv, - /// Signed multiplication - imul, + div, /// int3, + /// Store integer with truncation + istt, /// Conditional jump j, /// Jump jmp, + /// Load floating-point value + ld, /// Load effective address lea, /// Load string @@ -307,20 +339,17 @@ pub const Inst = struct { mfence, /// Move /// Move data from string to string + /// Move doubleword + /// Move quadword mov, /// Move data after swapping bytes movbe, - /// Move doubleword - movd, - /// Move quadword - movq, /// Move with sign extension movsx, - /// Move with sign extension - movsxd, /// Move with zero extension movzx, /// Multiply + /// Signed multiplication mul, /// Two's complement negation neg, @@ -337,19 +366,16 @@ pub const Inst = struct { /// Push push, /// Rotate left through carry - rcl, /// Rotate right through carry - rcr, + rc, /// Return ret, /// Rotate left - rol, /// Rotate right - ror, + ro, /// Arithmetic shift left - sal, /// Arithmetic shift right - sar, + sa, /// Integer subtraction with borrow sbb, /// Scan string @@ -359,13 +385,10 @@ pub const Inst = struct { /// Store fence sfence, /// Logical shift left - shl, /// Double precision shift left - shld, /// Logical shift right - shr, /// Double precision shift right - shrd, + sh, /// Subtract sub, /// Store string @@ -730,6 +753,8 @@ pub const Inst = struct { pseudo, }; + pub const FixedTag = struct { Fixes, Tag }; + pub const Ops = enum(u8) { /// No data associated with this instruction (only mnemonic is used). none, @@ -800,16 +825,16 @@ pub const Inst = struct { /// Uses `x` with extra data of type `MemoryRip`. m_rip, /// Memory (SIB), immediate (unsigned) operands. - /// Uses `ix` payload with extra data of type `MemorySib`. + /// Uses `x` payload with extra data of type `Imm32` followed by `MemorySib`. mi_sib_u, /// Memory (RIP), immediate (unsigned) operands. - /// Uses `ix` payload with extra data of type `MemoryRip`. + /// Uses `x` payload with extra data of type `Imm32` followed by `MemoryRip`. mi_rip_u, /// Memory (SIB), immediate (sign-extend) operands. - /// Uses `ix` payload with extra data of type `MemorySib`. + /// Uses `x` payload with extra data of type `Imm32` followed by `MemorySib`. mi_sib_s, /// Memory (RIP), immediate (sign-extend) operands. - /// Uses `ix` payload with extra data of type `MemoryRip`. + /// Uses `x` payload with extra data of type `Imm32` followed by `MemoryRip`. mi_rip_s, /// Memory (SIB), register operands. /// Uses `rx` payload with extra data of type `MemorySib`. @@ -974,11 +999,6 @@ pub const Inst = struct { r1: Register, payload: u32, }, - /// Immediate, followed by Custom payload found in extra. - ix: struct { - i: u32, - payload: u32, - }, /// Register, register, followed by Custom payload found in extra. rrx: struct { fixes: Fixes = ._, @@ -1081,6 +1101,10 @@ pub const RegisterList = struct { } }; +pub const Imm32 = struct { + imm: u32, +}; + pub const Imm64 = struct { msb: u32, lsb: u32, -- cgit v1.2.3 From 1f5aa7747f5710e281cd2190508ce562a4bfd35f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 May 2023 07:35:31 -0400 Subject: x86_64: finish optimizing mir tag usage Final tag count is 95. --- src/arch/x86_64/CodeGen.zig | 368 ++++++++++++++++++++++---------------------- src/arch/x86_64/Mir.zig | 364 ++++++++++--------------------------------- 2 files changed, 266 insertions(+), 466 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 147be62e28..2dc1cc8ee4 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2443,7 +2443,7 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToTmpRegister(src_ty, src_mcv); try self.asmRegisterRegisterImmediate( - .{ ._, .vcvtps2ph }, + .{ .v_, .cvtps2ph }, dst_reg, mat_src_reg.to128(), Immediate.u(0b1_00), @@ -2455,12 +2455,12 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { } } else if (src_bits == 64 and dst_bits == 32) { if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( - .{ ._, .vcvtsd2ss }, + .{ .v_, .cvtsd2ss }, dst_reg, dst_reg, src_mcv.mem(.qword), ) else try self.asmRegisterRegisterRegister( - .{ ._, .vcvtsd2ss }, + .{ .v_, .cvtsd2ss }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -2506,22 +2506,22 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { src_mcv.getReg().? else try self.copyToTmpRegister(src_ty, src_mcv); - try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, mat_src_reg.to128()); + try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, mat_src_reg.to128()); switch (dst_bits) { 32 => {}, - 64 => try self.asmRegisterRegisterRegister(.{ ._, .vcvtss2sd }, dst_reg, dst_reg, dst_reg), + 64 => try self.asmRegisterRegisterRegister(.{ .v_, .cvtss2sd }, dst_reg, dst_reg, dst_reg), else => return self.fail("TODO implement airFpext from {} to {}", .{ src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), } } else if (src_bits == 32 and dst_bits == 64) { if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( - .{ ._, .vcvtss2sd }, + .{ .v_, .cvtss2sd }, dst_reg, dst_reg, src_mcv.mem(.dword), ) else try self.asmRegisterRegisterRegister( - .{ ._, .vcvtss2sd }, + .{ .v_, .cvtss2sd }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -4678,8 +4678,8 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(switch (ty_bits) { // No point using an extra prefix byte for *pd which performs the same operation. 16, 32, 64, 128 => switch (tag) { - .neg => .{ ._, .xorps }, - .fabs => .{ ._, .andnps }, + .neg => .{ ._ps, .xor }, + .fabs => .{ ._ps, .andn }, else => unreachable, }, 80 => return self.fail("TODO implement airFloatSign for {}", .{ @@ -4712,23 +4712,23 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { - 32 => if (self.hasFeature(.avx)) .{ ._, .vroundss } else .{ ._, .roundss }, - 64 => if (self.hasFeature(.avx)) .{ ._, .vroundsd } else .{ ._, .roundsd }, + 32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, + 64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 16, 80, 128 => null, else => unreachable, }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .{ ._, .vroundss } else .{ ._, .roundss }, - 2...4 => if (self.hasFeature(.avx)) .{ ._, .vroundps } else .{ ._, .roundps }, - 5...8 => if (self.hasFeature(.avx)) .{ ._, .vroundps } else null, + 1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, + 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round }, + 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else null, else => null, }, 64 => switch (ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .{ ._, .vroundsd } else .{ ._, .roundsd }, - 2 => if (self.hasFeature(.avx)) .{ ._, .vroundpd } else .{ ._, .roundpd }, - 3...4 => if (self.hasFeature(.avx)) .{ ._, .vroundpd } else null, + 1 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, + 2 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else .{ ._pd, .round }, + 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else null, else => null, }, 16, 80, 128 => null, @@ -4743,8 +4743,8 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 const abi_size = @intCast(u32, ty.abiSize(self.target.*)); const dst_alias = registerAlias(dst_reg, abi_size); - switch (mir_tag[1]) { - .vroundss, .vroundsd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( + switch (mir_tag[0]) { + .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( mir_tag, dst_alias, dst_alias, @@ -4799,18 +4799,18 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { src_mcv.getReg().? else try self.copyToTmpRegister(ty, src_mcv); - try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, mat_src_reg.to128()); - try self.asmRegisterRegisterRegister(.{ ._, .vsqrtss }, dst_reg, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, mat_src_reg.to128()); + try self.asmRegisterRegisterRegister(.{ .v_ss, .sqrt }, dst_reg, dst_reg, dst_reg); try self.asmRegisterRegisterImmediate( - .{ ._, .vcvtps2ph }, + .{ .v_, .cvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), ); break :result dst_mcv; } else null, - 32 => if (self.hasFeature(.avx)) .{ ._, .vsqrtss } else .{ ._, .sqrtss }, - 64 => if (self.hasFeature(.avx)) .{ ._, .vsqrtsd } else .{ ._, .sqrtsd }, + 32 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt }, + 64 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt }, 80, 128 => null, else => unreachable, }, @@ -4819,7 +4819,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { 1 => { try self.asmRegisterRegister( - .{ ._, .vcvtph2ps }, + .{ .v_, .cvtph2ps }, dst_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -4827,13 +4827,13 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { try self.copyToTmpRegister(ty, src_mcv)).to128(), ); try self.asmRegisterRegisterRegister( - .{ ._, .vsqrtss }, + .{ .v_ss, .sqrt }, dst_reg, dst_reg, dst_reg, ); try self.asmRegisterRegisterImmediate( - .{ ._, .vcvtps2ph }, + .{ .v_, .cvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), @@ -4843,22 +4843,22 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 2...8 => { const wide_reg = registerAlias(dst_reg, abi_size * 2); if (src_mcv.isMemory()) try self.asmRegisterMemory( - .{ ._, .vcvtph2ps }, + .{ .v_, .cvtph2ps }, wide_reg, src_mcv.mem(Memory.PtrSize.fromSize( @intCast(u32, @divExact(wide_reg.bitSize(), 16)), )), ) else try self.asmRegisterRegister( - .{ ._, .vcvtph2ps }, + .{ .v_, .cvtph2ps }, wide_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? else try self.copyToTmpRegister(ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.{ ._, .vsqrtps }, wide_reg, wide_reg); + try self.asmRegisterRegister(.{ .v_ps, .sqrt }, wide_reg, wide_reg); try self.asmRegisterRegisterImmediate( - .{ ._, .vcvtps2ph }, + .{ .v_, .cvtps2ph }, dst_reg, wide_reg, Immediate.u(0b1_00), @@ -4868,15 +4868,15 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { else => null, } else null, 32 => switch (ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .{ ._, .vsqrtss } else .{ ._, .sqrtss }, - 2...4 => if (self.hasFeature(.avx)) .{ ._, .vsqrtps } else .{ ._, .sqrtps }, - 5...8 => if (self.hasFeature(.avx)) .{ ._, .vsqrtps } else null, + 1 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt }, + 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else .{ ._ps, .sqrt }, + 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else null, else => null, }, 64 => switch (ty.vectorLen()) { - 1 => if (self.hasFeature(.avx)) .{ ._, .vsqrtsd } else .{ ._, .sqrtsd }, - 2 => if (self.hasFeature(.avx)) .{ ._, .vsqrtpd } else .{ ._, .sqrtpd }, - 3...4 => if (self.hasFeature(.avx)) .{ ._, .vsqrtpd } else null, + 1 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt }, + 2 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else .{ ._pd, .sqrt }, + 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else null, else => null, }, 80, 128 => null, @@ -4888,8 +4888,8 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - switch (mir_tag[1]) { - .vsqrtss, .vsqrtsd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( + switch (mir_tag[0]) { + .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( mir_tag, dst_reg, dst_reg, @@ -6325,13 +6325,13 @@ fn genBinOp( defer self.register_manager.unlockReg(tmp_lock); if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( - .{ ._, .vpinsrw }, + .{ .vp_w, .insr }, dst_reg, dst_reg, src_mcv.mem(.word), Immediate.u(1), ) else try self.asmRegisterRegisterRegister( - .{ ._, .vpunpcklwd }, + .{ .vp_, .unpcklwd }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -6339,15 +6339,15 @@ fn genBinOp( else try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, dst_reg); - try self.asmRegisterRegister(.{ ._, .vmovshdup }, tmp_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp_reg, dst_reg); try self.asmRegisterRegisterRegister( switch (air_tag) { - .add => .{ ._, .vaddss }, - .sub => .{ ._, .vsubss }, - .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivss }, - .max => .{ ._, .vmaxss }, - .min => .{ ._, .vmaxss }, + .add => .{ .v_ss, .add }, + .sub => .{ .v_ss, .sub }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ss, .div }, + .max => .{ .v_ss, .max }, + .min => .{ .v_ss, .max }, else => unreachable, }, dst_reg, @@ -6355,7 +6355,7 @@ fn genBinOp( tmp_reg, ); try self.asmRegisterRegisterImmediate( - .{ ._, .vcvtps2ph }, + .{ .v_, .cvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), @@ -6363,29 +6363,29 @@ fn genBinOp( return dst_mcv; } else null, 32 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .{ ._, .vaddss } else .{ ._, .addss }, - .sub => if (self.hasFeature(.avx)) .{ ._, .vsubss } else .{ ._, .subss }, - .mul => if (self.hasFeature(.avx)) .{ ._, .vmulss } else .{ ._, .mulss }, + .add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add }, + .sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub }, + .mul => if (self.hasFeature(.avx)) .{ .v_ss, .mul } else .{ ._ss, .mul }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .{ ._, .vdivss } else .{ ._, .divss }, - .max => if (self.hasFeature(.avx)) .{ ._, .vmaxss } else .{ ._, .maxss }, - .min => if (self.hasFeature(.avx)) .{ ._, .vminss } else .{ ._, .minss }, + => if (self.hasFeature(.avx)) .{ .v_ss, .div } else .{ ._ss, .div }, + .max => if (self.hasFeature(.avx)) .{ .v_ss, .max } else .{ ._ss, .max }, + .min => if (self.hasFeature(.avx)) .{ .v_ss, .min } else .{ ._ss, .min }, else => unreachable, }, 64 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .{ ._, .vaddsd } else .{ ._, .addsd }, - .sub => if (self.hasFeature(.avx)) .{ ._, .vsubsd } else .{ ._, .subsd }, - .mul => if (self.hasFeature(.avx)) .{ ._, .vmulsd } else .{ ._, .mulsd }, + .add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add }, + .sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub }, + .mul => if (self.hasFeature(.avx)) .{ .v_sd, .mul } else .{ ._sd, .mul }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .{ ._, .vdivsd } else .{ ._, .divsd }, - .max => if (self.hasFeature(.avx)) .{ ._, .vmaxsd } else .{ ._, .maxsd }, - .min => if (self.hasFeature(.avx)) .{ ._, .vminsd } else .{ ._, .minsd }, + => if (self.hasFeature(.avx)) .{ .v_sd, .div } else .{ ._sd, .div }, + .max => if (self.hasFeature(.avx)) .{ .v_sd, .max } else .{ ._sd, .max }, + .min => if (self.hasFeature(.avx)) .{ .v_sd, .min } else .{ ._sd, .min }, else => unreachable, }, 80, 128 => null, @@ -6401,13 +6401,13 @@ fn genBinOp( defer self.register_manager.unlockReg(tmp_lock); if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( - .{ ._, .vpinsrw }, + .{ .vp_w, .insr }, dst_reg, dst_reg, src_mcv.mem(.word), Immediate.u(1), ) else try self.asmRegisterRegisterRegister( - .{ ._, .vpunpcklwd }, + .{ .vp_, .unpcklwd }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -6415,15 +6415,15 @@ fn genBinOp( else try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, dst_reg); - try self.asmRegisterRegister(.{ ._, .vmovshdup }, tmp_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp_reg, dst_reg); try self.asmRegisterRegisterRegister( switch (air_tag) { - .add => .{ ._, .vaddss }, - .sub => .{ ._, .vsubss }, - .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivss }, - .max => .{ ._, .vmaxss }, - .min => .{ ._, .vmaxss }, + .add => .{ .v_ss, .add }, + .sub => .{ .v_ss, .sub }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ss, .div }, + .max => .{ .v_ss, .max }, + .min => .{ .v_ss, .max }, else => unreachable, }, dst_reg, @@ -6431,7 +6431,7 @@ fn genBinOp( tmp_reg, ); try self.asmRegisterRegisterImmediate( - .{ ._, .vcvtps2ph }, + .{ .v_, .cvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), @@ -6444,12 +6444,12 @@ fn genBinOp( defer self.register_manager.unlockReg(tmp_lock); if (src_mcv.isMemory()) try self.asmRegisterMemoryImmediate( - .{ ._, .vpinsrd }, + .{ .vp_d, .insr }, dst_reg, src_mcv.mem(.dword), Immediate.u(1), ) else try self.asmRegisterRegisterRegister( - .{ ._, .vunpcklps }, + .{ .v_ps, .unpckl }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -6457,20 +6457,20 @@ fn genBinOp( else try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, dst_reg); try self.asmRegisterRegisterRegister( - .{ ._, .vmovhlps }, + .{ .v_ps, .movhl }, tmp_reg, dst_reg, dst_reg, ); try self.asmRegisterRegisterRegister( switch (air_tag) { - .add => .{ ._, .vaddps }, - .sub => .{ ._, .vsubps }, - .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivps }, - .max => .{ ._, .vmaxps }, - .min => .{ ._, .vmaxps }, + .add => .{ .v_ps, .add }, + .sub => .{ .v_ps, .sub }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div }, + .max => .{ .v_ps, .max }, + .min => .{ .v_ps, .max }, else => unreachable, }, dst_reg, @@ -6478,7 +6478,7 @@ fn genBinOp( tmp_reg, ); try self.asmRegisterRegisterImmediate( - .{ ._, .vcvtps2ph }, + .{ .v_, .cvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), @@ -6490,13 +6490,13 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, dst_reg); if (src_mcv.isMemory()) try self.asmRegisterMemory( - .{ ._, .vcvtph2ps }, + .{ .v_, .cvtph2ps }, tmp_reg, src_mcv.mem(.qword), ) else try self.asmRegisterRegister( - .{ ._, .vcvtph2ps }, + .{ .v_, .cvtph2ps }, tmp_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -6505,11 +6505,11 @@ fn genBinOp( ); try self.asmRegisterRegisterRegister( switch (air_tag) { - .add => .{ ._, .vaddps }, - .sub => .{ ._, .vsubps }, - .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivps }, - .max => .{ ._, .vmaxps }, - .min => .{ ._, .vmaxps }, + .add => .{ .v_ps, .add }, + .sub => .{ .v_ps, .sub }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div }, + .max => .{ .v_ps, .max }, + .min => .{ .v_ps, .max }, else => unreachable, }, dst_reg, @@ -6517,7 +6517,7 @@ fn genBinOp( tmp_reg, ); try self.asmRegisterRegisterImmediate( - .{ ._, .vcvtps2ph }, + .{ .v_, .cvtps2ph }, dst_reg, dst_reg, Immediate.u(0b1_00), @@ -6529,13 +6529,13 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, dst_reg.to256(), dst_reg); + try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg.to256(), dst_reg); if (src_mcv.isMemory()) try self.asmRegisterMemory( - .{ ._, .vcvtph2ps }, + .{ .v_, .cvtph2ps }, tmp_reg, src_mcv.mem(.xword), ) else try self.asmRegisterRegister( - .{ ._, .vcvtph2ps }, + .{ .v_, .cvtph2ps }, tmp_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -6544,11 +6544,11 @@ fn genBinOp( ); try self.asmRegisterRegisterRegister( switch (air_tag) { - .add => .{ ._, .vaddps }, - .sub => .{ ._, .vsubps }, - .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivps }, - .max => .{ ._, .vmaxps }, - .min => .{ ._, .vmaxps }, + .add => .{ .v_ps, .add }, + .sub => .{ .v_ps, .sub }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div }, + .max => .{ .v_ps, .max }, + .min => .{ .v_ps, .max }, else => unreachable, }, dst_reg.to256(), @@ -6556,7 +6556,7 @@ fn genBinOp( tmp_reg, ); try self.asmRegisterRegisterImmediate( - .{ ._, .vcvtps2ph }, + .{ .v_, .cvtps2ph }, dst_reg, dst_reg.to256(), Immediate.u(0b1_00), @@ -6567,76 +6567,76 @@ fn genBinOp( } else null, 32 => switch (lhs_ty.vectorLen()) { 1 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .{ ._, .vaddss } else .{ ._, .addss }, - .sub => if (self.hasFeature(.avx)) .{ ._, .vsubss } else .{ ._, .subss }, - .mul => if (self.hasFeature(.avx)) .{ ._, .vmulss } else .{ ._, .mulss }, + .add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add }, + .sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub }, + .mul => if (self.hasFeature(.avx)) .{ .v_ss, .mul } else .{ ._ss, .mul }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .{ ._, .vdivss } else .{ ._, .divss }, - .max => if (self.hasFeature(.avx)) .{ ._, .vmaxss } else .{ ._, .maxss }, - .min => if (self.hasFeature(.avx)) .{ ._, .vminss } else .{ ._, .minss }, + => if (self.hasFeature(.avx)) .{ .v_ss, .div } else .{ ._ss, .div }, + .max => if (self.hasFeature(.avx)) .{ .v_ss, .max } else .{ ._ss, .max }, + .min => if (self.hasFeature(.avx)) .{ .v_ss, .min } else .{ ._ss, .min }, else => unreachable, }, 2...4 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .{ ._, .vaddps } else .{ ._, .addps }, - .sub => if (self.hasFeature(.avx)) .{ ._, .vsubps } else .{ ._, .subps }, - .mul => if (self.hasFeature(.avx)) .{ ._, .vmulps } else .{ ._, .mulps }, + .add => if (self.hasFeature(.avx)) .{ .v_ps, .add } else .{ ._ps, .add }, + .sub => if (self.hasFeature(.avx)) .{ .v_ps, .sub } else .{ ._ps, .sub }, + .mul => if (self.hasFeature(.avx)) .{ .v_ps, .mul } else .{ ._ps, .mul }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .{ ._, .vdivps } else .{ ._, .divps }, - .max => if (self.hasFeature(.avx)) .{ ._, .vmaxps } else .{ ._, .maxps }, - .min => if (self.hasFeature(.avx)) .{ ._, .vminps } else .{ ._, .minps }, + => if (self.hasFeature(.avx)) .{ .v_ps, .div } else .{ ._ps, .div }, + .max => if (self.hasFeature(.avx)) .{ .v_ps, .max } else .{ ._ps, .max }, + .min => if (self.hasFeature(.avx)) .{ .v_ps, .min } else .{ ._ps, .min }, else => unreachable, }, 5...8 => if (self.hasFeature(.avx)) switch (air_tag) { - .add => .{ ._, .vaddps }, - .sub => .{ ._, .vsubps }, - .mul => .{ ._, .vmulps }, - .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivps }, - .max => .{ ._, .vmaxps }, - .min => .{ ._, .vminps }, + .add => .{ .v_ps, .add }, + .sub => .{ .v_ps, .sub }, + .mul => .{ .v_ps, .mul }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div }, + .max => .{ .v_ps, .max }, + .min => .{ .v_ps, .min }, else => unreachable, } else null, else => null, }, 64 => switch (lhs_ty.vectorLen()) { 1 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .{ ._, .vaddsd } else .{ ._, .addsd }, - .sub => if (self.hasFeature(.avx)) .{ ._, .vsubsd } else .{ ._, .subsd }, - .mul => if (self.hasFeature(.avx)) .{ ._, .vmulsd } else .{ ._, .mulsd }, + .add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add }, + .sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub }, + .mul => if (self.hasFeature(.avx)) .{ .v_sd, .mul } else .{ ._sd, .mul }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .{ ._, .vdivsd } else .{ ._, .divsd }, - .max => if (self.hasFeature(.avx)) .{ ._, .vmaxsd } else .{ ._, .maxsd }, - .min => if (self.hasFeature(.avx)) .{ ._, .vminsd } else .{ ._, .minsd }, + => if (self.hasFeature(.avx)) .{ .v_sd, .div } else .{ ._sd, .div }, + .max => if (self.hasFeature(.avx)) .{ .v_sd, .max } else .{ ._sd, .max }, + .min => if (self.hasFeature(.avx)) .{ .v_sd, .min } else .{ ._sd, .min }, else => unreachable, }, 2 => switch (air_tag) { - .add => if (self.hasFeature(.avx)) .{ ._, .vaddpd } else .{ ._, .addpd }, - .sub => if (self.hasFeature(.avx)) .{ ._, .vsubpd } else .{ ._, .subpd }, - .mul => if (self.hasFeature(.avx)) .{ ._, .vmulpd } else .{ ._, .mulpd }, + .add => if (self.hasFeature(.avx)) .{ .v_pd, .add } else .{ ._pd, .add }, + .sub => if (self.hasFeature(.avx)) .{ .v_pd, .sub } else .{ ._pd, .sub }, + .mul => if (self.hasFeature(.avx)) .{ .v_pd, .mul } else .{ ._pd, .mul }, .div_float, .div_trunc, .div_floor, .div_exact, - => if (self.hasFeature(.avx)) .{ ._, .vdivpd } else .{ ._, .divpd }, - .max => if (self.hasFeature(.avx)) .{ ._, .vmaxpd } else .{ ._, .maxpd }, - .min => if (self.hasFeature(.avx)) .{ ._, .vminpd } else .{ ._, .minpd }, + => if (self.hasFeature(.avx)) .{ .v_pd, .div } else .{ ._pd, .div }, + .max => if (self.hasFeature(.avx)) .{ .v_pd, .max } else .{ ._pd, .max }, + .min => if (self.hasFeature(.avx)) .{ .v_pd, .min } else .{ ._pd, .min }, else => unreachable, }, 3...4 => if (self.hasFeature(.avx)) switch (air_tag) { - .add => .{ ._, .vaddpd }, - .sub => .{ ._, .vsubpd }, - .mul => .{ ._, .vmulpd }, - .div_float, .div_trunc, .div_floor, .div_exact => .{ ._, .vdivpd }, - .max => .{ ._, .vmaxpd }, - .min => .{ ._, .vminpd }, + .add => .{ .v_pd, .add }, + .sub => .{ .v_pd, .sub }, + .mul => .{ .v_pd, .mul }, + .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_pd, .div }, + .max => .{ .v_pd, .max }, + .min => .{ .v_pd, .min }, else => unreachable, } else null, else => null, @@ -7563,13 +7563,13 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer self.register_manager.unlockReg(tmp2_lock); if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( - .{ ._, .vpinsrw }, + .{ .vp_w, .insr }, tmp1_reg, dst_reg.to128(), src_mcv.mem(.word), Immediate.u(1), ) else try self.asmRegisterRegisterRegister( - .{ ._, .vpunpcklwd }, + .{ .vp_, .unpcklwd }, tmp1_reg, dst_reg.to128(), (if (src_mcv.isRegister()) @@ -7577,20 +7577,20 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { else try self.copyToTmpRegister(ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.{ ._, .vcvtph2ps }, tmp1_reg, tmp1_reg); - try self.asmRegisterRegister(.{ ._, .vmovshdup }, tmp2_reg, tmp1_reg); - try self.genBinOpMir(.{ ._, .ucomiss }, ty, tmp1_mcv, tmp2_mcv); + try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, tmp1_reg, tmp1_reg); + try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp2_reg, tmp1_reg); + try self.genBinOpMir(.{ ._ss, .ucomi }, ty, tmp1_mcv, tmp2_mcv); } else return self.fail("TODO implement airCmp for {}", .{ ty.fmt(self.bin_file.options.module.?), }), 32 => try self.genBinOpMir( - .{ ._, .ucomiss }, + .{ ._ss, .ucomi }, ty, .{ .register = dst_reg }, src_mcv, ), 64 => try self.genBinOpMir( - .{ ._, .ucomisd }, + .{ ._sd, .ucomi }, ty, .{ .register = dst_reg }, src_mcv, @@ -8573,42 +8573,42 @@ fn movMirTag(self: *Self, ty: Type, aligned: bool) !Mir.Inst.FixedTag { else => return .{ ._, .mov }, .Float => switch (ty.floatBits(self.target.*)) { 16 => unreachable, // needs special handling - 32 => return if (self.hasFeature(.avx)) .{ ._, .vmovss } else .{ ._, .movss }, - 64 => return if (self.hasFeature(.avx)) .{ ._, .vmovsd } else .{ ._, .movsd }, + 32 => return if (self.hasFeature(.avx)) .{ .v_ss, .mov } else .{ ._ss, .mov }, + 64 => return if (self.hasFeature(.avx)) .{ .v_sd, .mov } else .{ ._sd, .mov }, 128 => return if (self.hasFeature(.avx)) - if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups } - else if (aligned) .{ ._, .movaps } else .{ ._, .movups }, + if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, else => {}, }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { 16 => switch (ty.vectorLen()) { 1 => unreachable, // needs special handling - 2 => return if (self.hasFeature(.avx)) .{ ._, .vmovss } else .{ ._, .movss }, - 3...4 => return if (self.hasFeature(.avx)) .{ ._, .vmovsd } else .{ ._, .movsd }, + 2 => return if (self.hasFeature(.avx)) .{ .v_ss, .mov } else .{ ._ss, .mov }, + 3...4 => return if (self.hasFeature(.avx)) .{ .v_sd, .mov } else .{ ._sd, .mov }, 5...8 => return if (self.hasFeature(.avx)) - if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups } - else if (aligned) .{ ._, .movaps } else .{ ._, .movups }, + if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, 9...16 => if (self.hasFeature(.avx)) - return if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups }, + return if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }, else => {}, }, 32 => switch (ty.vectorLen()) { - 1 => return if (self.hasFeature(.avx)) .{ ._, .vmovss } else .{ ._, .movss }, + 1 => return if (self.hasFeature(.avx)) .{ .v_ss, .mov } else .{ ._ss, .mov }, 2...4 => return if (self.hasFeature(.avx)) - if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups } - else if (aligned) .{ ._, .movaps } else .{ ._, .movups }, + if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, 5...8 => if (self.hasFeature(.avx)) - return if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups }, + return if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }, else => {}, }, 64 => switch (ty.vectorLen()) { - 1 => return if (self.hasFeature(.avx)) .{ ._, .vmovsd } else .{ ._, .movsd }, + 1 => return if (self.hasFeature(.avx)) .{ .v_sd, .mov } else .{ ._sd, .mov }, 2 => return if (self.hasFeature(.avx)) - if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups } - else if (aligned) .{ ._, .movaps } else .{ ._, .movups }, + if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, 3...4 => if (self.hasFeature(.avx)) - return if (aligned) .{ ._, .vmovaps } else .{ ._, .vmovups }, + return if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }, else => {}, }, else => {}, @@ -8724,11 +8724,11 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr if ((dst_reg.class() == .floating_point) == (src_reg.class() == .floating_point)) switch (ty.zigTypeTag()) { else => .{ ._, .mov }, - .Float, .Vector => .{ ._, .movaps }, + .Float, .Vector => .{ ._ps, .mova }, } else switch (abi_size) { 2 => return try self.asmRegisterRegisterImmediate( - if (dst_reg.class() == .floating_point) .{ ._, .pinsrw } else .{ ._, .pextrw }, + if (dst_reg.class() == .floating_point) .{ .p_w, .insr } else .{ .p_w, .extr }, registerAlias(dst_reg, 4), registerAlias(src_reg, 4), Immediate.u(0), @@ -8761,7 +8761,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }); if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) try self.asmRegisterMemoryImmediate( - .{ ._, .pinsrw }, + .{ .p_w, .insr }, registerAlias(dst_reg, abi_size), src_mem, Immediate.u(0), @@ -8794,7 +8794,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }); return if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) self.asmRegisterMemoryImmediate( - .{ ._, .pinsrw }, + .{ .p_w, .insr }, registerAlias(dst_reg, abi_size), src_mem, Immediate.u(0), @@ -8838,7 +8838,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }); if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) try self.asmRegisterMemoryImmediate( - .{ ._, .pinsrw }, + .{ .p_w, .insr }, registerAlias(dst_reg, abi_size), src_mem, Immediate.u(0), @@ -8952,7 +8952,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal ); if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) try self.asmMemoryRegisterImmediate( - .{ ._, .pextrw }, + .{ .p_w, .extr }, dst_mem, src_reg.to128(), Immediate.u(0), @@ -9069,7 +9069,7 @@ fn genInlineMemcpyRegisterRegister( try self.asmMemoryRegister( switch (src_reg.class()) { .general_purpose, .segment => .{ ._, .mov }, - .floating_point => .{ ._, .movss }, + .floating_point => .{ ._ss, .mov }, }, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = dst_reg, .disp = -offset }), registerAlias(src_reg, abi_size), @@ -10197,21 +10197,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { - 32 => .{ ._, .vfmadd132ss }, - 64 => .{ ._, .vfmadd132sd }, + 32 => .{ .v_ss, .fmadd132 }, + 64 => .{ .v_sd, .fmadd132 }, 16, 80, 128 => null, else => unreachable, }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { - 1 => .{ ._, .vfmadd132ss }, - 2...8 => .{ ._, .vfmadd132ps }, + 1 => .{ .v_ss, .fmadd132 }, + 2...8 => .{ .v_ps, .fmadd132 }, else => null, }, 64 => switch (ty.vectorLen()) { - 1 => .{ ._, .vfmadd132sd }, - 2...4 => .{ ._, .vfmadd132pd }, + 1 => .{ .v_sd, .fmadd132 }, + 2...4 => .{ .v_pd, .fmadd132 }, else => null, }, 16, 80, 128 => null, @@ -10224,21 +10224,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { - 32 => .{ ._, .vfmadd213ss }, - 64 => .{ ._, .vfmadd213sd }, + 32 => .{ .v_ss, .fmadd213 }, + 64 => .{ .v_sd, .fmadd213 }, 16, 80, 128 => null, else => unreachable, }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { - 1 => .{ ._, .vfmadd213ss }, - 2...8 => .{ ._, .vfmadd213ps }, + 1 => .{ .v_ss, .fmadd213 }, + 2...8 => .{ .v_ps, .fmadd213 }, else => null, }, 64 => switch (ty.vectorLen()) { - 1 => .{ ._, .vfmadd213sd }, - 2...4 => .{ ._, .vfmadd213pd }, + 1 => .{ .v_sd, .fmadd213 }, + 2...4 => .{ .v_pd, .fmadd213 }, else => null, }, 16, 80, 128 => null, @@ -10251,21 +10251,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) switch (ty.zigTypeTag()) { .Float => switch (ty.floatBits(self.target.*)) { - 32 => .{ ._, .vfmadd231ss }, - 64 => .{ ._, .vfmadd231sd }, + 32 => .{ .v_ss, .fmadd231 }, + 64 => .{ .v_sd, .fmadd231 }, 16, 80, 128 => null, else => unreachable, }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { - 1 => .{ ._, .vfmadd231ss }, - 2...8 => .{ ._, .vfmadd231ps }, + 1 => .{ .v_ss, .fmadd231 }, + 2...8 => .{ .v_ps, .fmadd231 }, else => null, }, 64 => switch (ty.vectorLen()) { - 1 => .{ ._, .vfmadd231sd }, - 2...4 => .{ ._, .vfmadd231pd }, + 1 => .{ .v_sd, .fmadd231 }, + 2...4 => .{ .v_pd, .fmadd231 }, else => null, }, 16, 80, 128 => null, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 6b5e2bded7..0a7b5597b3 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -278,8 +278,14 @@ pub const Inst = struct { /// Add with carry adc, /// Add + /// Add packed single-precision floating-point values + /// Add scalar single-precision floating-point values + /// Add packed double-precision floating-point values + /// Add scalar double-precision floating-point values add, /// Logical and + /// Bitwise logical and of packed single-precision floating-point values + /// Bitwise logical and of packed double-precision floating-point values @"and", /// Bit scan forward bsf, @@ -304,6 +310,8 @@ pub const Inst = struct { cmov, /// Logical compare /// Compare string + /// Compare scalar single-precision floating-point values + /// Compare scalar double-precision floating-point values cmp, /// Compare and exchange /// Compare and exchange bytes @@ -316,6 +324,10 @@ pub const Inst = struct { cwde, /// Unsigned division /// Signed division + /// Divide packed single-precision floating-point values + /// Divide scalar single-precision floating-point values + /// Divide packed double-precision floating-point values + /// Divide scalar double-precision floating-point values div, /// int3, @@ -339,6 +351,8 @@ pub const Inst = struct { mfence, /// Move /// Move data from string to string + /// Move scalar single-precision floating-point value + /// Move scalar double-precision floating-point value /// Move doubleword /// Move quadword mov, @@ -350,6 +364,10 @@ pub const Inst = struct { movzx, /// Multiply /// Signed multiplication + /// Multiply packed single-precision floating-point values + /// Multiply scalar single-precision floating-point values + /// Multiply packed double-precision floating-point values + /// Multiply scalar double-precision floating-point values mul, /// Two's complement negation neg, @@ -358,6 +376,8 @@ pub const Inst = struct { /// One's complement negation not, /// Logical or + /// Bitwise logical or of packed single-precision floating-point values + /// Bitwise logical or of packed double-precision floating-point values @"or", /// Pop pop, @@ -390,6 +410,10 @@ pub const Inst = struct { /// Double precision shift right sh, /// Subtract + /// Subtract packed single-precision floating-point values + /// Subtract scalar single-precision floating-point values + /// Subtract packed double-precision floating-point values + /// Subtract scalar double-precision floating-point values sub, /// Store string sto, @@ -406,145 +430,88 @@ pub const Inst = struct { /// Exchange register/memory with register xchg, /// Logical exclusive-or + /// Bitwise logical xor of packed single-precision floating-point values + /// Bitwise logical xor of packed double-precision floating-point values xor, - /// Add packed single-precision floating-point values - addps, - /// Add scalar single-precision floating-point values - addss, - /// Bitwise logical and of packed single precision floating-point values - andps, - /// Bitwise logical and not of packed single precision floating-point values - andnps, - /// Compare scalar single-precision floating-point values - cmpss, + /// Bitwise logical and not of packed single-precision floating-point values + /// Bitwise logical and not of packed double-precision floating-point values + andn, /// Convert doubleword integer to scalar single-precision floating-point value cvtsi2ss, - /// Divide packed single-precision floating-point values - divps, - /// Divide scalar single-precision floating-point values - divss, /// Maximum of packed single-precision floating-point values - maxps, /// Maximum of scalar single-precision floating-point values - maxss, + /// Maximum of packed double-precision floating-point values + /// Maximum of scalar double-precision floating-point values + max, /// Minimum of packed single-precision floating-point values - minps, /// Minimum of scalar single-precision floating-point values - minss, + /// Minimum of packed double-precision floating-point values + /// Minimum of scalar double-precision floating-point values + min, /// Move aligned packed single-precision floating-point values - movaps, + /// Move aligned packed double-precision floating-point values + mova, /// Move packed single-precision floating-point values high to low - movhlps, - /// Move scalar single-precision floating-point value - movss, + movhl, /// Move unaligned packed single-precision floating-point values - movups, - /// Multiply packed single-precision floating-point values - mulps, - /// Multiply scalar single-precision floating-point values - mulss, - /// Bitwise logical or of packed single precision floating-point values - orps, + /// Move unaligned packed double-precision floating-point values + movu, + /// Extract byte /// Extract word - pextrw, + /// Extract doubleword + /// Extract quadword + extr, + /// Insert byte /// Insert word - pinsrw, + /// Insert doubleword + /// Insert quadword + insr, /// Square root of packed single-precision floating-point values - sqrtps, /// Square root of scalar single-precision floating-point value - sqrtss, - /// Subtract packed single-precision floating-point values - subps, - /// Subtract scalar single-precision floating-point values - subss, + /// Square root of packed double-precision floating-point values + /// Square root of scalar double-precision floating-point value + sqrt, /// Unordered compare scalar single-precision floating-point values - ucomiss, + /// Unordered compare scalar double-precision floating-point values + ucomi, /// Unpack and interleave high packed single-precision floating-point values - unpckhps, + /// Unpack and interleave high packed double-precision floating-point values + unpckh, /// Unpack and interleave low packed single-precision floating-point values - unpcklps, - /// Bitwise logical xor of packed single precision floating-point values - xorps, + /// Unpack and interleave low packed double-precision floating-point values + unpckl, - /// Add packed double-precision floating-point values - addpd, - /// Add scalar double-precision floating-point values - addsd, - /// Bitwise logical and not of packed double precision floating-point values - andnpd, - /// Bitwise logical and of packed double precision floating-point values - andpd, - /// Compare scalar double-precision floating-point values - cmpsd, /// Convert scalar double-precision floating-point value to scalar single-precision floating-point value cvtsd2ss, /// Convert doubleword integer to scalar double-precision floating-point value cvtsi2sd, /// Convert scalar single-precision floating-point value to scalar double-precision floating-point value cvtss2sd, - /// Divide packed double-precision floating-point values - divpd, - /// Divide scalar double-precision floating-point values - divsd, - /// Maximum of packed double-precision floating-point values - maxpd, - /// Maximum of scalar double-precision floating-point values - maxsd, - /// Minimum of packed double-precision floating-point values - minpd, - /// Minimum of scalar double-precision floating-point values - minsd, - /// Move scalar double-precision floating-point value - movsd, - /// Multiply packed double-precision floating-point values - mulpd, - /// Multiply scalar double-precision floating-point values - mulsd, - /// Bitwise logical or of packed double precision floating-point values - orpd, /// Shuffle packed high words - pshufhw, + shufh, /// Shuffle packed low words - pshuflw, + shufl, /// Shift packed data right logical - psrld, /// Shift packed data right logical - psrlq, /// Shift packed data right logical - psrlw, + srl, /// Unpack high data - punpckhbw, + unpckhbw, /// Unpack high data - punpckhdq, + unpckhdq, /// Unpack high data - punpckhqdq, + unpckhqdq, /// Unpack high data - punpckhwd, + unpckhwd, /// Unpack low data - punpcklbw, + unpcklbw, /// Unpack low data - punpckldq, + unpckldq, /// Unpack low data - punpcklqdq, + unpcklqdq, /// Unpack low data - punpcklwd, - /// Square root of double precision floating-point values - sqrtpd, - /// Square root of scalar double precision floating-point value - sqrtsd, - /// Subtract packed double-precision floating-point values - subpd, - /// Subtract scalar double-precision floating-point values - subsd, - /// Unordered compare scalar double-precision floating-point values - ucomisd, - /// Unpack and interleave high packed double-precision floating-point values - unpckhpd, - /// Unpack and interleave low packed double-precision floating-point values - unpcklpd, - /// Bitwise logical xor of packed double precision floating-point values - xorpd, + unpcklwd, /// Replicate double floating-point values movddup, @@ -553,199 +520,32 @@ pub const Inst = struct { /// Replicate single floating-point values movsldup, - /// Extract Byte - pextrb, - /// Extract Doubleword - pextrd, - /// Extract Quadword - pextrq, - /// Insert Byte - pinsrb, - /// Insert Doubleword - pinsrd, - /// Insert Quadword - pinsrq, - /// Round packed double-precision floating-point values - roundpd, /// Round packed single-precision floating-point values - roundps, - /// Round scalar double-precision floating-point value - roundsd, /// Round scalar single-precision floating-point value - roundss, - - /// Add packed double-precision floating-point values - vaddpd, - /// Add packed single-precision floating-point values - vaddps, - /// Add scalar double-precision floating-point values - vaddsd, - /// Add scalar single-precision floating-point values - vaddss, - /// Convert scalar double-precision floating-point value to scalar single-precision floating-point value - vcvtsd2ss, - /// Convert doubleword integer to scalar double-precision floating-point value - vcvtsi2sd, - /// Convert doubleword integer to scalar single-precision floating-point value - vcvtsi2ss, - /// Convert scalar single-precision floating-point value to scalar double-precision floating-point value - vcvtss2sd, - /// Divide packed double-precision floating-point values - vdivpd, - /// Divide packed single-precision floating-point values - vdivps, - /// Divide scalar double-precision floating-point values - vdivsd, - /// Divide scalar single-precision floating-point values - vdivss, - /// Maximum of packed double-precision floating-point values - vmaxpd, - /// Maximum of packed single-precision floating-point values - vmaxps, - /// Maximum of scalar double-precision floating-point values - vmaxsd, - /// Maximum of scalar single-precision floating-point values - vmaxss, - /// Minimum of packed double-precision floating-point values - vminpd, - /// Minimum of packed single-precision floating-point values - vminps, - /// Minimum of scalar double-precision floating-point values - vminsd, - /// Minimum of scalar single-precision floating-point values - vminss, - /// Move aligned packed double-precision floating-point values - vmovapd, - /// Move aligned packed single-precision floating-point values - vmovaps, - /// Move packed single-precision floating-point values high to low - vmovhlps, - /// Replicate double floating-point values - vmovddup, - /// Move or merge scalar double-precision floating-point value - vmovsd, - /// Replicate single floating-point values - vmovshdup, - /// Replicate single floating-point values - vmovsldup, - /// Move or merge scalar single-precision floating-point value - vmovss, - /// Move unaligned packed double-precision floating-point values - vmovupd, - /// Move unaligned packed single-precision floating-point values - vmovups, - /// Multiply packed double-precision floating-point values - vmulpd, - /// Multiply packed single-precision floating-point values - vmulps, - /// Multiply scalar double-precision floating-point values - vmulsd, - /// Multiply scalar single-precision floating-point values - vmulss, - /// Extract Byte - vpextrb, - /// Extract Doubleword - vpextrd, - /// Extract Quadword - vpextrq, - /// Extract word - vpextrw, - /// Insert Byte - vpinsrb, - /// Insert Doubleword - vpinsrd, - /// Insert Quadword - vpinsrq, - /// Insert word - vpinsrw, - /// Shuffle packed high words - vpshufhw, - /// Shuffle packed low words - vpshuflw, - /// Shift packed data right logical - vpsrld, - /// Shift packed data right logical - vpsrlq, - /// Shift packed data right logical - vpsrlw, - /// Unpack high data - vpunpckhbw, - /// Unpack high data - vpunpckhdq, - /// Unpack high data - vpunpckhqdq, - /// Unpack high data - vpunpckhwd, - /// Unpack low data - vpunpcklbw, - /// Unpack low data - vpunpckldq, - /// Unpack low data - vpunpcklqdq, - /// Unpack low data - vpunpcklwd, /// Round packed double-precision floating-point values - vroundpd, - /// Round packed single-precision floating-point values - vroundps, /// Round scalar double-precision floating-point value - vroundsd, - /// Round scalar single-precision floating-point value - vroundss, - /// Square root of packed double-precision floating-point value - vsqrtpd, - /// Square root of packed single-precision floating-point value - vsqrtps, - /// Square root of scalar double-precision floating-point value - vsqrtsd, - /// Square root of scalar single-precision floating-point value - vsqrtss, - /// Subtract packed double-precision floating-point values - vsubpd, - /// Subtract packed single-precision floating-point values - vsubps, - /// Subtract scalar double-precision floating-point values - vsubsd, - /// Subtract scalar single-precision floating-point values - vsubss, - /// Unpack and interleave high packed double-precision floating-point values - vunpckhpd, - /// Unpack and interleave high packed single-precision floating-point values - vunpckhps, - /// Unpack and interleave low packed double-precision floating-point values - vunpcklpd, - /// Unpack and interleave low packed single-precision floating-point values - vunpcklps, + round, /// Convert 16-bit floating-point values to single-precision floating-point values - vcvtph2ps, + cvtph2ps, /// Convert single-precision floating-point values to 16-bit floating-point values - vcvtps2ph, + cvtps2ph, - /// Fused multiply-add of packed double-precision floating-point values - vfmadd132pd, - /// Fused multiply-add of packed double-precision floating-point values - vfmadd213pd, - /// Fused multiply-add of packed double-precision floating-point values - vfmadd231pd, - /// Fused multiply-add of packed single-precision floating-point values - vfmadd132ps, /// Fused multiply-add of packed single-precision floating-point values - vfmadd213ps, - /// Fused multiply-add of packed single-precision floating-point values - vfmadd231ps, - /// Fused multiply-add of scalar double-precision floating-point values - vfmadd132sd, - /// Fused multiply-add of scalar double-precision floating-point values - vfmadd213sd, - /// Fused multiply-add of scalar double-precision floating-point values - vfmadd231sd, /// Fused multiply-add of scalar single-precision floating-point values - vfmadd132ss, + /// Fused multiply-add of packed double-precision floating-point values + /// Fused multiply-add of scalar double-precision floating-point values + fmadd132, + /// Fused multiply-add of packed single-precision floating-point values /// Fused multiply-add of scalar single-precision floating-point values - vfmadd213ss, + /// Fused multiply-add of packed double-precision floating-point values + /// Fused multiply-add of scalar double-precision floating-point values + fmadd213, + /// Fused multiply-add of packed single-precision floating-point values /// Fused multiply-add of scalar single-precision floating-point values - vfmadd231ss, + /// Fused multiply-add of packed double-precision floating-point values + /// Fused multiply-add of scalar double-precision floating-point values + fmadd231, /// A pseudo instruction that requires special lowering. /// This should be the only tag in this enum that doesn't -- cgit v1.2.3 From 815e53b147a321d0bdb47dc008aa8181f57175ac Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Thu, 4 May 2023 18:05:40 -0700 Subject: Update all std.mem.tokenize calls to their appropriate function Everywhere that can now use `tokenizeScalar` should get a nice little performance boost. --- build.zig | 8 ++++---- lib/std/Build.zig | 2 +- lib/std/Build/Cache.zig | 4 ++-- lib/std/Build/Step/CheckObject.zig | 8 ++++---- lib/std/Build/Step/Compile.zig | 6 +++--- lib/std/Build/Step/ConfigHeader.zig | 4 ++-- lib/std/child_process.zig | 4 ++-- lib/std/fs.zig | 2 +- lib/std/fs/path.zig | 26 ++++++++++++------------ lib/std/http/Client.zig | 4 ++-- lib/std/http/Server.zig | 4 ++-- lib/std/net.zig | 6 +++--- lib/std/os.zig | 2 +- lib/std/process.zig | 2 +- lib/std/zig/system/NativePaths.zig | 10 ++++----- lib/std/zig/system/NativeTargetInfo.zig | 4 ++-- src/arch/x86_64/CodeGen.zig | 6 +++--- src/glibc.zig | 2 +- src/libc_installation.zig | 8 ++++---- src/link/Plan9.zig | 2 +- src/print_zir.zig | 2 +- test/behavior/bugs/6456.zig | 2 +- test/src/Cases.zig | 4 ++-- tools/generate_linux_syscalls.zig | 36 ++++++++++++++++----------------- 24 files changed, 79 insertions(+), 79 deletions(-) (limited to 'src/arch') diff --git a/build.zig b/build.zig index 208d06fe1d..21b323df56 100644 --- a/build.zig +++ b/build.zig @@ -284,7 +284,7 @@ pub fn build(b: *std.Build) !void { // That means we also have to rely on stage1 compiled c++ files. We parse config.h to find // the information passed on to us from cmake. if (cfg.cmake_prefix_path.len > 0) { - var it = mem.tokenize(u8, cfg.cmake_prefix_path, ";"); + var it = mem.tokenizeScalar(u8, cfg.cmake_prefix_path, ';'); while (it.next()) |path| { b.addSearchPrefix(path); } @@ -687,7 +687,7 @@ fn addCxxKnownPath( if (!std.process.can_spawn) return error.RequiredLibraryNotFound; const path_padded = b.exec(&.{ ctx.cxx_compiler, b.fmt("-print-file-name={s}", .{objname}) }); - var tokenizer = mem.tokenize(u8, path_padded, "\r\n"); + var tokenizer = mem.tokenizeAny(u8, path_padded, "\r\n"); const path_unpadded = tokenizer.next().?; if (mem.eql(u8, path_unpadded, objname)) { if (errtxt) |msg| { @@ -710,7 +710,7 @@ fn addCxxKnownPath( } fn addCMakeLibraryList(exe: *std.Build.Step.Compile, list: []const u8) void { - var it = mem.tokenize(u8, list, ";"); + var it = mem.tokenizeScalar(u8, list, ';'); while (it.next()) |lib| { if (mem.startsWith(u8, lib, "-l")) { exe.linkSystemLibrary(lib["-l".len..]); @@ -855,7 +855,7 @@ fn parseConfigH(b: *std.Build, config_h_text: []const u8) ?CMakeConfig { // .prefix = ZIG_LLVM_LINK_MODE parsed manually below }; - var lines_it = mem.tokenize(u8, config_h_text, "\r\n"); + var lines_it = mem.tokenizeAny(u8, config_h_text, "\r\n"); while (lines_it.next()) |line| { inline for (mappings) |mapping| { if (mem.startsWith(u8, line, mapping.prefix)) { diff --git a/lib/std/Build.zig b/lib/std/Build.zig index ca55d23937..4ab5db5c70 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -1358,7 +1358,7 @@ pub fn findProgram(self: *Build, names: []const []const u8, paths: []const []con if (fs.path.isAbsolute(name)) { return name; } - var it = mem.tokenize(u8, PATH, &[_]u8{fs.path.delimiter}); + var it = mem.tokenizeScalar(u8, PATH, fs.path.delimiter); while (it.next()) |path| { const full_path = self.pathJoin(&.{ path, diff --git a/lib/std/Build/Cache.zig b/lib/std/Build/Cache.zig index 17429c0370..7709e5e26c 100644 --- a/lib/std/Build/Cache.zig +++ b/lib/std/Build/Cache.zig @@ -434,7 +434,7 @@ pub const Manifest = struct { const input_file_count = self.files.items.len; var any_file_changed = false; - var line_iter = mem.tokenize(u8, file_contents, "\n"); + var line_iter = mem.tokenizeScalar(u8, file_contents, '\n'); var idx: usize = 0; if (if (line_iter.next()) |line| !std.mem.eql(u8, line, manifest_header) else true) { if (try self.upgradeToExclusiveLock()) continue; @@ -463,7 +463,7 @@ pub const Manifest = struct { break :blk new; }; - var iter = mem.tokenize(u8, line, " "); + var iter = mem.tokenizeScalar(u8, line, ' '); const size = iter.next() orelse return error.InvalidFormat; const inode = iter.next() orelse return error.InvalidFormat; const mtime_nsec_str = iter.next() orelse return error.InvalidFormat; diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index c77dc3de36..24ebfef388 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -103,8 +103,8 @@ const Action = struct { assert(act.tag == .match or act.tag == .not_present); const phrase = act.phrase.resolve(b, step); var candidate_var: ?struct { name: []const u8, value: u64 } = null; - var hay_it = mem.tokenize(u8, mem.trim(u8, haystack, " "), " "); - var needle_it = mem.tokenize(u8, mem.trim(u8, phrase, " "), " "); + var hay_it = mem.tokenizeScalar(u8, mem.trim(u8, haystack, " "), ' '); + var needle_it = mem.tokenizeScalar(u8, mem.trim(u8, phrase, " "), ' '); while (needle_it.next()) |needle_tok| { const hay_tok = hay_it.next() orelse return false; @@ -155,7 +155,7 @@ const Action = struct { var op_stack = std.ArrayList(enum { add, sub, mod, mul }).init(gpa); var values = std.ArrayList(u64).init(gpa); - var it = mem.tokenize(u8, phrase, " "); + var it = mem.tokenizeScalar(u8, phrase, ' '); while (it.next()) |next| { if (mem.eql(u8, next, "+")) { try op_stack.append(.add); @@ -365,7 +365,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void { var vars = std.StringHashMap(u64).init(gpa); for (self.checks.items) |chk| { - var it = mem.tokenize(u8, output, "\r\n"); + var it = mem.tokenizeAny(u8, output, "\r\n"); for (chk.actions.items) |act| { switch (act.tag) { .match => { diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 2371f49daf..6a05adc1a6 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -777,7 +777,7 @@ fn runPkgConfig(self: *Compile, lib_name: []const u8) ![]const []const u8 { var zig_args = ArrayList([]const u8).init(b.allocator); defer zig_args.deinit(); - var it = mem.tokenize(u8, stdout, " \r\n\t"); + var it = mem.tokenizeAny(u8, stdout, " \r\n\t"); while (it.next()) |tok| { if (mem.eql(u8, tok, "-I")) { const dir = it.next() orelse return error.PkgConfigInvalidOutput; @@ -2017,10 +2017,10 @@ fn execPkgConfigList(self: *std.Build, out_code: *u8) (PkgConfigError || ExecErr const stdout = try self.execAllowFail(&[_][]const u8{ "pkg-config", "--list-all" }, out_code, .Ignore); var list = ArrayList(PkgConfigPkg).init(self.allocator); errdefer list.deinit(); - var line_it = mem.tokenize(u8, stdout, "\r\n"); + var line_it = mem.tokenizeAny(u8, stdout, "\r\n"); while (line_it.next()) |line| { if (mem.trim(u8, line, " \t").len == 0) continue; - var tok_it = mem.tokenize(u8, line, " \t"); + var tok_it = mem.tokenizeAny(u8, line, " \t"); try list.append(PkgConfigPkg{ .name = tok_it.next() orelse return error.PkgConfigInvalidOutput, .desc = tok_it.rest(), diff --git a/lib/std/Build/Step/ConfigHeader.zig b/lib/std/Build/Step/ConfigHeader.zig index f6939e0e38..cd97367218 100644 --- a/lib/std/Build/Step/ConfigHeader.zig +++ b/lib/std/Build/Step/ConfigHeader.zig @@ -257,7 +257,7 @@ fn render_autoconf( try output.appendSlice("\n"); continue; } - var it = std.mem.tokenize(u8, line[1..], " \t\r"); + var it = std.mem.tokenizeAny(u8, line[1..], " \t\r"); const undef = it.next().?; if (!std.mem.eql(u8, undef, "undef")) { try output.appendSlice(line); @@ -304,7 +304,7 @@ fn render_cmake( try output.appendSlice("\n"); continue; } - var it = std.mem.tokenize(u8, line[1..], " \t\r"); + var it = std.mem.tokenizeAny(u8, line[1..], " \t\r"); const cmakedefine = it.next().?; if (!std.mem.eql(u8, cmakedefine, "cmakedefine") and !std.mem.eql(u8, cmakedefine, "cmakedefine01")) diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index daaa1689bc..d94f5ea000 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -850,7 +850,7 @@ pub const ChildProcess = struct { return original_err; } - var it = mem.tokenize(u16, PATH, &[_]u16{';'}); + var it = mem.tokenizeScalar(u16, PATH, ';'); while (it.next()) |search_path| { dir_buf.clearRetainingCapacity(); try dir_buf.appendSlice(self.allocator, search_path); @@ -1067,7 +1067,7 @@ fn windowsCreateProcessPathExt( // Now we know that at least *a* file matching the wildcard exists, we can loop // through PATHEXT in order and exec any that exist - var ext_it = mem.tokenize(u16, pathext, &[_]u16{';'}); + var ext_it = mem.tokenizeScalar(u16, pathext, ';'); while (ext_it.next()) |ext| { if (!windowsCreateProcessSupportsExtension(ext)) continue; diff --git a/lib/std/fs.zig b/lib/std/fs.zig index 7327a3a913..5aeea8a4aa 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -3021,7 +3021,7 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 { } else if (argv0.len != 0) { // argv[0] is not empty (and not a path): search it inside PATH const PATH = std.os.getenvZ("PATH") orelse return error.FileNotFound; - var path_it = mem.tokenize(u8, PATH, &[_]u8{path.delimiter}); + var path_it = mem.tokenizeScalar(u8, PATH, path.delimiter); while (path_it.next()) |a_path| { var resolved_path_buf: [MAX_PATH_BYTES - 1:0]u8 = undefined; const resolved_path = std.fmt.bufPrintZ(&resolved_path_buf, "{s}/{s}", .{ diff --git a/lib/std/fs/path.zig b/lib/std/fs/path.zig index 4c320ae5cf..e7a28a7615 100644 --- a/lib/std/fs/path.zig +++ b/lib/std/fs/path.zig @@ -358,7 +358,7 @@ pub fn windowsParsePath(path: []const u8) WindowsPath { return relative_path; } - var it = mem.tokenize(u8, path, &[_]u8{this_sep}); + var it = mem.tokenizeScalar(u8, path, this_sep); _ = (it.next() orelse return relative_path); _ = (it.next() orelse return relative_path); return WindowsPath{ @@ -420,8 +420,8 @@ fn networkShareServersEql(ns1: []const u8, ns2: []const u8) bool { const sep1 = ns1[0]; const sep2 = ns2[0]; - var it1 = mem.tokenize(u8, ns1, &[_]u8{sep1}); - var it2 = mem.tokenize(u8, ns2, &[_]u8{sep2}); + var it1 = mem.tokenizeScalar(u8, ns1, sep1); + var it2 = mem.tokenizeScalar(u8, ns2, sep2); // TODO ASCII is wrong, we actually need full unicode support to compare paths. return ascii.eqlIgnoreCase(it1.next().?, it2.next().?); @@ -441,8 +441,8 @@ fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8 const sep1 = p1[0]; const sep2 = p2[0]; - var it1 = mem.tokenize(u8, p1, &[_]u8{sep1}); - var it2 = mem.tokenize(u8, p2, &[_]u8{sep2}); + var it1 = mem.tokenizeScalar(u8, p1, sep1); + var it2 = mem.tokenizeScalar(u8, p2, sep2); // TODO ASCII is wrong, we actually need full unicode support to compare paths. return ascii.eqlIgnoreCase(it1.next().?, it2.next().?) and ascii.eqlIgnoreCase(it1.next().?, it2.next().?); @@ -535,7 +535,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 { break :l disk_designator.len; }, .NetworkShare => { - var it = mem.tokenize(u8, paths[first_index], "/\\"); + var it = mem.tokenizeAny(u8, paths[first_index], "/\\"); const server_name = it.next().?; const other_name = it.next().?; @@ -570,7 +570,7 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 { if (!correct_disk_designator) { continue; } - var it = mem.tokenize(u8, p[parsed.disk_designator.len..], "/\\"); + var it = mem.tokenizeAny(u8, p[parsed.disk_designator.len..], "/\\"); while (it.next()) |component| { if (mem.eql(u8, component, ".")) { continue; @@ -657,7 +657,7 @@ pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) Allocator.E negative_count = 0; result.clearRetainingCapacity(); } - var it = mem.tokenize(u8, p, "/"); + var it = mem.tokenizeScalar(u8, p, '/'); while (it.next()) |component| { if (mem.eql(u8, component, ".")) { continue; @@ -1078,8 +1078,8 @@ pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ! return resolved_to; } - var from_it = mem.tokenize(u8, resolved_from, "/\\"); - var to_it = mem.tokenize(u8, resolved_to, "/\\"); + var from_it = mem.tokenizeAny(u8, resolved_from, "/\\"); + var to_it = mem.tokenizeAny(u8, resolved_to, "/\\"); while (true) { const from_component = from_it.next() orelse return allocator.dupe(u8, to_it.rest()); const to_rest = to_it.rest(); @@ -1102,7 +1102,7 @@ pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) ! result_index += 3; } - var rest_it = mem.tokenize(u8, to_rest, "/\\"); + var rest_it = mem.tokenizeAny(u8, to_rest, "/\\"); while (rest_it.next()) |to_component| { result[result_index] = '\\'; result_index += 1; @@ -1124,8 +1124,8 @@ pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![] const resolved_to = try resolvePosix(allocator, &[_][]const u8{ cwd, to }); defer allocator.free(resolved_to); - var from_it = mem.tokenize(u8, resolved_from, "/"); - var to_it = mem.tokenize(u8, resolved_to, "/"); + var from_it = mem.tokenizeScalar(u8, resolved_from, '/'); + var to_it = mem.tokenizeScalar(u8, resolved_to, '/'); while (true) { const from_component = from_it.next() orelse return allocator.dupe(u8, to_it.rest()); const to_rest = to_it.rest(); diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig index 023bdd28bc..5626864ceb 100644 --- a/lib/std/http/Client.zig +++ b/lib/std/http/Client.zig @@ -386,7 +386,7 @@ pub const Response = struct { }; pub fn parse(res: *Response, bytes: []const u8, trailing: bool) ParseError!void { - var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n"); + var it = mem.tokenizeAny(u8, bytes[0 .. bytes.len - 4], "\r\n"); const first_line = it.next() orelse return error.HttpHeadersInvalid; if (first_line.len < 12) @@ -412,7 +412,7 @@ pub const Response = struct { else => {}, } - var line_it = mem.tokenize(u8, line, ": "); + var line_it = mem.tokenizeAny(u8, line, ": "); const header_name = line_it.next() orelse return error.HttpHeadersInvalid; const header_value = line_it.rest(); diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig index 6b5db6725f..51ab6c086b 100644 --- a/lib/std/http/Server.zig +++ b/lib/std/http/Server.zig @@ -231,7 +231,7 @@ pub const Request = struct { }; pub fn parse(req: *Request, bytes: []const u8) ParseError!void { - var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n"); + var it = mem.tokenizeAny(u8, bytes[0 .. bytes.len - 4], "\r\n"); const first_line = it.next() orelse return error.HttpHeadersInvalid; if (first_line.len < 10) @@ -265,7 +265,7 @@ pub const Request = struct { else => {}, } - var line_it = mem.tokenize(u8, line, ": "); + var line_it = mem.tokenizeAny(u8, line, ": "); const header_name = line_it.next() orelse return error.HttpHeadersInvalid; const header_value = line_it.rest(); diff --git a/lib/std/net.zig b/lib/std/net.zig index 57e50a7349..4360cc29f4 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1266,7 +1266,7 @@ fn linuxLookupNameFromHosts( var split_it = mem.split(u8, line, "#"); const no_comment_line = split_it.first(); - var line_it = mem.tokenize(u8, no_comment_line, " \t"); + var line_it = mem.tokenizeAny(u8, no_comment_line, " \t"); const ip_text = line_it.next() orelse continue; var first_name_text: ?[]const u8 = null; while (line_it.next()) |name_text| { @@ -1346,7 +1346,7 @@ fn linuxLookupNameFromDnsSearch( @memcpy(canon.items, canon_name); try canon.append('.'); - var tok_it = mem.tokenize(u8, search, " \t"); + var tok_it = mem.tokenizeAny(u8, search, " \t"); while (tok_it.next()) |tok| { canon.shrinkRetainingCapacity(canon_name.len + 1); try canon.appendSlice(tok); @@ -1468,7 +1468,7 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void { var split = mem.split(u8, line, "#"); break :no_comment_line split.first(); }; - var line_it = mem.tokenize(u8, no_comment_line, " \t"); + var line_it = mem.tokenizeAny(u8, no_comment_line, " \t"); const token = line_it.next() orelse continue; if (mem.eql(u8, token, "options")) { diff --git a/lib/std/os.zig b/lib/std/os.zig index 779e913230..eac79690b5 100644 --- a/lib/std/os.zig +++ b/lib/std/os.zig @@ -1878,7 +1878,7 @@ pub fn execvpeZ_expandArg0( // Use of MAX_PATH_BYTES here is valid as the path_buf will be passed // directly to the operating system in execveZ. var path_buf: [MAX_PATH_BYTES]u8 = undefined; - var it = mem.tokenize(u8, PATH, ":"); + var it = mem.tokenizeScalar(u8, PATH, ':'); var seen_eacces = false; var err: ExecveError = error.FileNotFound; diff --git a/lib/std/process.zig b/lib/std/process.zig index 504f9075eb..c33fd92db6 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1200,7 +1200,7 @@ fn totalSystemMemoryLinux() !usize { var buf: [50]u8 = undefined; const amt = try file.read(&buf); if (amt != 50) return error.Unexpected; - var it = std.mem.tokenize(u8, buf[0..amt], " \n"); + var it = std.mem.tokenizeAny(u8, buf[0..amt], " \n"); const label = it.next().?; if (!std.mem.eql(u8, label, "MemTotal:")) return error.Unexpected; const int_text = it.next() orelse return error.Unexpected; diff --git a/lib/std/zig/system/NativePaths.zig b/lib/std/zig/system/NativePaths.zig index 70c795b0cf..368e3e062d 100644 --- a/lib/std/zig/system/NativePaths.zig +++ b/lib/std/zig/system/NativePaths.zig @@ -31,7 +31,7 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths defer allocator.free(nix_cflags_compile); is_nix = true; - var it = mem.tokenize(u8, nix_cflags_compile, " "); + var it = mem.tokenizeScalar(u8, nix_cflags_compile, ' '); while (true) { const word = it.next() orelse break; if (mem.eql(u8, word, "-isystem")) { @@ -62,7 +62,7 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths defer allocator.free(nix_ldflags); is_nix = true; - var it = mem.tokenize(u8, nix_ldflags, " "); + var it = mem.tokenizeScalar(u8, nix_ldflags, ' '); while (true) { const word = it.next() orelse break; if (mem.eql(u8, word, "-rpath")) { @@ -147,21 +147,21 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths // We use os.getenv here since this part won't be executed on // windows, to get rid of unnecessary error handling. if (std.os.getenv("C_INCLUDE_PATH")) |c_include_path| { - var it = mem.tokenize(u8, c_include_path, ":"); + var it = mem.tokenizeScalar(u8, c_include_path, ':'); while (it.next()) |dir| { try self.addIncludeDir(dir); } } if (std.os.getenv("CPLUS_INCLUDE_PATH")) |cplus_include_path| { - var it = mem.tokenize(u8, cplus_include_path, ":"); + var it = mem.tokenizeScalar(u8, cplus_include_path, ':'); while (it.next()) |dir| { try self.addIncludeDir(dir); } } if (std.os.getenv("LIBRARY_PATH")) |library_path| { - var it = mem.tokenize(u8, library_path, ":"); + var it = mem.tokenizeScalar(u8, library_path, ':'); while (it.next()) |dir| { try self.addLibDir(dir); } diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig index 539ad96365..808a1bda8d 100644 --- a/lib/std/zig/system/NativeTargetInfo.zig +++ b/lib/std/zig/system/NativeTargetInfo.zig @@ -354,7 +354,7 @@ fn detectAbiAndDynamicLinker( const newline = mem.indexOfScalar(u8, buffer[0..len], '\n') orelse break :blk file; const line = buffer[0..newline]; if (!mem.startsWith(u8, line, "#!")) break :blk file; - var it = mem.tokenize(u8, line[2..], " "); + var it = mem.tokenizeScalar(u8, line[2..], ' '); file_name = it.next() orelse return defaultAbiAndDynamicLinker(cpu, os, cross_target); file.close(); } @@ -811,7 +811,7 @@ pub fn abiAndDynamicLinkerFromFile( const strtab = strtab_buf[0..strtab_read_len]; const rpath_list = mem.sliceTo(strtab, 0); - var it = mem.tokenize(u8, rpath_list, ":"); + var it = mem.tokenizeScalar(u8, rpath_list, ':'); while (it.next()) |rpath| { if (glibcVerFromRPath(rpath)) |ver| { result.target.os.version_range.linux.glibc = ver; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 2dc1cc8ee4..be09a33bde 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8409,9 +8409,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } const asm_source = mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; - var line_it = mem.tokenize(u8, asm_source, "\n\r;"); + var line_it = mem.tokenizeAny(u8, asm_source, "\n\r;"); while (line_it.next()) |line| { - var mnem_it = mem.tokenize(u8, line, " \t"); + var mnem_it = mem.tokenizeAny(u8, line, " \t"); const mnem_str = mnem_it.next() orelse continue; if (mem.startsWith(u8, mnem_str, "#")) continue; @@ -8435,7 +8435,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { return self.fail("Invalid mnemonic: '{s}'", .{mnem_str}); } }; - var op_it = mem.tokenize(u8, mnem_it.rest(), ","); + var op_it = mem.tokenizeScalar(u8, mnem_it.rest(), ','); var ops = [1]encoder.Instruction.Operand{.none} ** 4; for (&ops) |*op| { const op_str = mem.trim(u8, op_it.next() orelse break, " \t"); diff --git a/src/glibc.zig b/src/glibc.zig index 327e4f4bb9..00787381f4 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -109,7 +109,7 @@ pub fn loadMetaData(gpa: Allocator, contents: []const u8) LoadMetaDataError!*ABI const target_name = mem.sliceTo(contents[index..], 0); index += target_name.len + 1; - var component_it = mem.tokenize(u8, target_name, "-"); + var component_it = mem.tokenizeScalar(u8, target_name, '-'); const arch_name = component_it.next() orelse { log.err("abilists: expected arch name", .{}); return error.ZigInstallationCorrupt; diff --git a/src/libc_installation.zig b/src/libc_installation.zig index da877e1291..a62da6b9c7 100644 --- a/src/libc_installation.zig +++ b/src/libc_installation.zig @@ -60,7 +60,7 @@ pub const LibCInstallation = struct { const contents = try std.fs.cwd().readFileAlloc(allocator, libc_file, std.math.maxInt(usize)); defer allocator.free(contents); - var it = std.mem.tokenize(u8, contents, "\n"); + var it = std.mem.tokenizeScalar(u8, contents, '\n'); while (it.next()) |line| { if (line.len == 0 or line[0] == '#') continue; var line_it = std.mem.split(u8, line, "="); @@ -293,7 +293,7 @@ pub const LibCInstallation = struct { }, } - var it = std.mem.tokenize(u8, exec_res.stderr, "\n\r"); + var it = std.mem.tokenizeAny(u8, exec_res.stderr, "\n\r"); var search_paths = std.ArrayList([]const u8).init(allocator); defer search_paths.deinit(); while (it.next()) |line| { @@ -613,7 +613,7 @@ fn ccPrintFileName(args: CCPrintFileNameOptions) ![:0]u8 { }, } - var it = std.mem.tokenize(u8, exec_res.stdout, "\n\r"); + var it = std.mem.tokenizeAny(u8, exec_res.stdout, "\n\r"); const line = it.next() orelse return error.LibCRuntimeNotFound; // When this command fails, it returns exit code 0 and duplicates the input file name. // So we detect failure by checking if the output matches exactly the input. @@ -692,7 +692,7 @@ fn appendCcExe(args: *std.ArrayList([]const u8), skip_cc_env_var: bool) !void { return; }; // Respect space-separated flags to the C compiler. - var it = std.mem.tokenize(u8, cc_env_var, " "); + var it = std.mem.tokenizeScalar(u8, cc_env_var, ' '); while (it.next()) |arg| { try args.append(arg); } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index bef06d1c87..f8ac4e09c1 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -264,7 +264,7 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void { fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !void { const sep = std.fs.path.sep; - var it = std.mem.tokenize(u8, path, &.{sep}); + var it = std.mem.tokenizeScalar(u8, path, sep); while (it.next()) |component| { if (self.file_segments.get(component)) |num| { try a.writer().writeIntBig(u16, num); diff --git a/src/print_zir.zig b/src/print_zir.zig index f5e84fcf5b..6ded52ae9f 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -2581,7 +2581,7 @@ const Writer = struct { fn writeDocComment(self: *Writer, stream: anytype, doc_comment_index: u32) !void { if (doc_comment_index != 0) { const doc_comment = self.code.nullTerminatedString(doc_comment_index); - var it = std.mem.tokenize(u8, doc_comment, "\n"); + var it = std.mem.tokenizeScalar(u8, doc_comment, '\n'); while (it.next()) |doc_line| { try stream.writeByteNTimes(' ', self.indent); try stream.print("///{s}\n", .{doc_line}); diff --git a/test/behavior/bugs/6456.zig b/test/behavior/bugs/6456.zig index 1eef9c7f75..297c9c7423 100644 --- a/test/behavior/bugs/6456.zig +++ b/test/behavior/bugs/6456.zig @@ -18,7 +18,7 @@ test "issue 6456" { comptime { var fields: []const StructField = &[0]StructField{}; - var it = std.mem.tokenize(u8, text, "\n"); + var it = std.mem.tokenizeScalar(u8, text, '\n'); while (it.next()) |name| { fields = fields ++ &[_]StructField{StructField{ .alignment = 0, diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 68ecebc7bd..aa5369af93 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -846,7 +846,7 @@ const TestManifest = struct { const actual_start = start orelse return error.MissingTestManifest; const manifest_bytes = bytes[actual_start..end]; - var it = std.mem.tokenize(u8, manifest_bytes, "\r\n"); + var it = std.mem.tokenizeAny(u8, manifest_bytes, "\r\n"); // First line is the test type const tt: Type = blk: { @@ -923,7 +923,7 @@ const TestManifest = struct { fn trailing(self: TestManifest) TrailingIterator { return .{ - .inner = std.mem.tokenize(u8, self.trailing_bytes, "\r\n"), + .inner = std.mem.tokenizeAny(u8, self.trailing_bytes, "\r\n"), }; } diff --git a/tools/generate_linux_syscalls.zig b/tools/generate_linux_syscalls.zig index 11b18ae3bf..32e287b434 100644 --- a/tools/generate_linux_syscalls.zig +++ b/tools/generate_linux_syscalls.zig @@ -51,11 +51,11 @@ pub fn main() !void { try writer.writeAll("pub const X86 = enum(usize) {\n"); const table = try linux_dir.readFile("arch/x86/entry/syscalls/syscall_32.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; // abi is always i386 _ = fields.next() orelse return error.Incomplete; @@ -70,11 +70,11 @@ pub fn main() !void { try writer.writeAll("pub const X64 = enum(usize) {\n"); const table = try linux_dir.readFile("arch/x86/entry/syscalls/syscall_64.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; const abi = fields.next() orelse return error.Incomplete; // The x32 abi syscalls are always at the end. @@ -96,11 +96,11 @@ pub fn main() !void { ); const table = try linux_dir.readFile("arch/arm/tools/syscall.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; const abi = fields.next() orelse return error.Incomplete; if (mem.eql(u8, abi, "oabi")) continue; @@ -127,11 +127,11 @@ pub fn main() !void { { try writer.writeAll("pub const Sparc64 = enum(usize) {\n"); const table = try linux_dir.readFile("arch/sparc/kernel/syscalls/syscall.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; const abi = fields.next() orelse return error.Incomplete; if (mem.eql(u8, abi, "32")) continue; @@ -151,11 +151,11 @@ pub fn main() !void { ); const table = try linux_dir.readFile("arch/mips/kernel/syscalls/syscall_o32.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; // abi is always o32 _ = fields.next() orelse return error.Incomplete; @@ -176,11 +176,11 @@ pub fn main() !void { ); const table = try linux_dir.readFile("arch/mips/kernel/syscalls/syscall_n64.tbl", buf); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; // abi is always n64 _ = fields.next() orelse return error.Incomplete; @@ -197,11 +197,11 @@ pub fn main() !void { const table = try linux_dir.readFile("arch/powerpc/kernel/syscalls/syscall.tbl", buf); var list_64 = std.ArrayList(u8).init(allocator); - var lines = mem.tokenize(u8, table, "\n"); + var lines = mem.tokenizeScalar(u8, table, '\n'); while (lines.next()) |line| { if (line[0] == '#') continue; - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const number = fields.next() orelse return error.Incomplete; const abi = fields.next() orelse return error.Incomplete; const name = fields.next() orelse return error.Incomplete; @@ -277,9 +277,9 @@ pub fn main() !void { }, }; - var lines = mem.tokenize(u8, defines, "\n"); + var lines = mem.tokenizeScalar(u8, defines, '\n'); loop: while (lines.next()) |line| { - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const cmd = fields.next() orelse return error.Incomplete; if (!mem.eql(u8, cmd, "#define")) continue; const define = fields.next() orelse return error.Incomplete; @@ -339,9 +339,9 @@ pub fn main() !void { }, }; - var lines = mem.tokenize(u8, defines, "\n"); + var lines = mem.tokenizeScalar(u8, defines, '\n'); loop: while (lines.next()) |line| { - var fields = mem.tokenize(u8, line, " \t"); + var fields = mem.tokenizeAny(u8, line, " \t"); const cmd = fields.next() orelse return error.Incomplete; if (!mem.eql(u8, cmd, "#define")) continue; const define = fields.next() orelse return error.Incomplete; -- cgit v1.2.3 From 019c8844811ffb8b385ac8891cfd17cbf60d104a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 May 2023 16:43:57 -0400 Subject: x86_64: add missing multply of `f16` --- src/arch/x86_64/CodeGen.zig | 5 +++++ test/behavior/floatop.zig | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 2dc1cc8ee4..63e3416079 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -6345,6 +6345,7 @@ fn genBinOp( switch (air_tag) { .add => .{ .v_ss, .add }, .sub => .{ .v_ss, .sub }, + .mul => .{ .v_ss, .mul }, .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ss, .div }, .max => .{ .v_ss, .max }, .min => .{ .v_ss, .max }, @@ -6421,6 +6422,7 @@ fn genBinOp( switch (air_tag) { .add => .{ .v_ss, .add }, .sub => .{ .v_ss, .sub }, + .mul => .{ .v_ss, .mul }, .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ss, .div }, .max => .{ .v_ss, .max }, .min => .{ .v_ss, .max }, @@ -6468,6 +6470,7 @@ fn genBinOp( switch (air_tag) { .add => .{ .v_ps, .add }, .sub => .{ .v_ps, .sub }, + .mul => .{ .v_ps, .mul }, .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div }, .max => .{ .v_ps, .max }, .min => .{ .v_ps, .max }, @@ -6507,6 +6510,7 @@ fn genBinOp( switch (air_tag) { .add => .{ .v_ps, .add }, .sub => .{ .v_ps, .sub }, + .mul => .{ .v_ps, .mul }, .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div }, .max => .{ .v_ps, .max }, .min => .{ .v_ps, .max }, @@ -6546,6 +6550,7 @@ fn genBinOp( switch (air_tag) { .add => .{ .v_ps, .add }, .sub => .{ .v_ps, .sub }, + .mul => .{ .v_ps, .mul }, .div_float, .div_trunc, .div_floor, .div_exact => .{ .v_ps, .div }, .max => .{ .v_ps, .max }, .min => .{ .v_ps, .max }, diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index d2035c656f..9d17b05865 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -184,7 +184,7 @@ test "more @sqrt f16 tests" { test "another, possibly redundant @sqrt test" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (no_x86_64_hardware_f16_support) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; try testSqrtLegacy(f64, 12.0); -- cgit v1.2.3 From e98e58691f2c0759c8534080446cf6faecd30eb0 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 May 2023 18:34:45 -0400 Subject: x86_64: fix crash with logging enabled --- src/arch/x86_64/CodeGen.zig | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 63e3416079..fe2b23e126 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1460,6 +1460,15 @@ fn asmMemoryRegister(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, reg: Regist } fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, imm: Immediate) !void { + const payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) { + .signed => |s| @bitCast(u32, s), + .unsigned => |u| @intCast(u32, u), + } }); + assert(payload + 1 == switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + }); _ = try self.addInst(.{ .tag = tag[1], .ops = switch (m) { @@ -1475,17 +1484,9 @@ fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, imm: Immed }, .data = .{ .x = .{ .fixes = tag[0], - .payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) { - .signed => |s| @bitCast(u32, s), - .unsigned => |u| @intCast(u32, u), - } }), + .payload = payload, } }, }); - _ = switch (m) { - .sib => try self.addExtra(Mir.MemorySib.encode(m)), - .rip => try self.addExtra(Mir.MemoryRip.encode(m)), - else => unreachable, - }; } fn asmMemoryRegisterRegister( -- cgit v1.2.3 From 1667e831cf8099271d97c9391fe4400622e6f96f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 May 2023 18:35:03 -0400 Subject: x86_64: fix issues with getting float fields --- src/arch/x86_64/CodeGen.zig | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index fe2b23e126..f4dbd502e7 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -5325,8 +5325,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const index = extra.field_index; const container_ty = self.air.typeOf(operand); + const container_rc = regClassForType(container_ty); const field_ty = container_ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + const field_rc = regClassForType(field_ty); const src_mcv = try self.resolveInst(operand); const field_off = switch (container_ty.containerLayout()) { @@ -5410,30 +5412,23 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { if (field_extra_bits > 0) try self.truncateRegister(field_ty, dst_reg); const dst_mcv = MCValue{ .register = dst_reg }; - const dst_rc = regClassForType(field_ty); - if (dst_rc.eql(gp)) break :result dst_mcv; - - const result_reg = try self.register_manager.allocReg(inst, dst_rc); - try self.genSetReg(result_reg, field_ty, dst_mcv); - break :result .{ .register = result_reg }; + break :result if (field_rc.supersetOf(gp)) + dst_mcv + else + try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv); }, .register => |reg| { const reg_lock = self.register_manager.lockRegAssumeUnused(reg); defer self.register_manager.unlockReg(reg_lock); - const dst_mcv = if (self.reuseOperand(inst, operand, 0, src_mcv)) - src_mcv + const dst_reg = if (src_mcv.isRegister() and field_rc.supersetOf(container_rc) and + self.reuseOperand(inst, operand, 0, src_mcv)) + src_mcv.getReg().? else - try self.copyToRegisterWithInstTracking( - inst, - Type.usize, - .{ .register = reg.to64() }, - ); - const dst_mcv_lock: ?RegisterLock = switch (dst_mcv) { - .register => |a_reg| self.register_manager.lockReg(a_reg), - else => null, - }; - defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock); + try self.copyToTmpRegister(Type.usize, .{ .register = reg.to64() }); + const dst_mcv = MCValue{ .register = dst_reg }; + const dst_lock = self.register_manager.lockReg(dst_reg); + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); // Shift by struct_field_offset. try self.genShiftBinOpMir( @@ -5460,7 +5455,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { registerAlias(dst_mcv.register, field_byte_size), ); } - break :result dst_mcv; + + break :result if (field_rc.supersetOf(gp)) + dst_mcv + else + try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv); }, .register_overflow => |ro| { switch (index) { -- cgit v1.2.3 From e36e9323f4c4ce66013ba4774ff0145fc9cad34d Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 May 2023 19:31:16 -0400 Subject: x86_64: implement union_init --- src/arch/x86_64/CodeGen.zig | 43 ++++++++++++++++++++++++++++++++++++++++--- test/behavior/bugs/13366.zig | 1 - test/behavior/struct.zig | 1 - test/behavior/tuple.zig | 1 - test/behavior/union.zig | 7 ------- 5 files changed, 40 insertions(+), 13 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index f4dbd502e7..2c7751cff5 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -10147,9 +10147,46 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; - _ = extra; - return self.fail("TODO implement airUnionInit for x86_64", .{}); - //return self.finishAir(inst, result, .{ extra.init, .none, .none }); + const result: MCValue = result: { + const union_ty = self.air.typeOfIndex(inst); + const layout = union_ty.unionGetLayout(self.target.*); + + const src_ty = self.air.typeOf(extra.init); + const src_mcv = try self.resolveInst(extra.init); + if (layout.tag_size == 0) { + if (self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv; + + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.genCopy(src_ty, dst_mcv, src_mcv); + break :result dst_mcv; + } + + const dst_mcv = try self.allocRegOrMem(inst, false); + + const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const field_name = union_obj.fields.keys()[extra.field_index]; + const tag_ty = union_ty.unionTagTypeSafety().?; + const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); + var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; + const tag_val = Value.initPayload(&tag_pl.base); + var tag_int_pl: Value.Payload.U64 = undefined; + const tag_int_val = tag_val.enumToInt(tag_ty, &tag_int_pl); + const tag_int = tag_int_val.toUnsignedInt(self.target.*); + const tag_off = if (layout.tag_align < layout.payload_align) + @intCast(i32, layout.payload_size) + else + 0; + try self.genCopy(tag_ty, dst_mcv.address().offset(tag_off).deref(), .{ .immediate = tag_int }); + + const pl_off = if (layout.tag_align < layout.payload_align) + 0 + else + @intCast(i32, layout.tag_size); + try self.genCopy(src_ty, dst_mcv.address().offset(pl_off).deref(), src_mcv); + + break :result dst_mcv; + }; + return self.finishAir(inst, result, .{ extra.init, .none, .none }); } fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { diff --git a/test/behavior/bugs/13366.zig b/test/behavior/bugs/13366.zig index cec015cc40..8419fbe265 100644 --- a/test/behavior/bugs/13366.zig +++ b/test/behavior/bugs/13366.zig @@ -14,7 +14,6 @@ const Block = struct { test { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 797a22c3a8..659acbf56b 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1352,7 +1352,6 @@ test "struct field init value is size of the struct" { } test "under-aligned struct field" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 2ce1922e50..b6fde88af2 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -367,7 +367,6 @@ test "branching inside tuple literal" { test "tuple initialized with a runtime known value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 26a5d060df..b03e40214f 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -821,7 +821,6 @@ test "return union init with void payload" { } test "@unionInit stored to a const" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO @@ -994,7 +993,6 @@ test "function call result coerces from tagged union to the tag" { } test "cast from anonymous struct to union" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO @@ -1028,7 +1026,6 @@ test "cast from anonymous struct to union" { } test "cast from pointer to anonymous struct to pointer to union" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO @@ -1199,7 +1196,6 @@ test "global variable struct contains union initialized to non-most-aligned fiel test "union with no result loc initiated with a runtime value" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -1217,7 +1213,6 @@ test "union with no result loc initiated with a runtime value" { test "union with a large struct field" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -1288,7 +1283,6 @@ test "extern union most-aligned field is smaller" { } test "return an extern union from C calling convention" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1397,7 +1391,6 @@ test "union and enum field order doesn't match" { } test "@unionInit uses tag value instead of field index" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 1336619979cfd5145c042ba7e2c6d0fbafc53574 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 8 May 2023 23:13:23 -0400 Subject: x86_64: fix field_ptr nonsense --- src/arch/x86_64/CodeGen.zig | 71 ++++++++------------------------------ test/behavior/field_parent_ptr.zig | 4 --- test/behavior/union.zig | 1 - 3 files changed, 14 insertions(+), 62 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 2c7751cff5..d1bc23b826 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -5257,64 +5257,24 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { const ptr_field_ty = self.air.typeOfIndex(inst); - const mcv = try self.resolveInst(operand); const ptr_container_ty = self.air.typeOf(operand); const container_ty = ptr_container_ty.childType(); - const field_offset = switch (container_ty.containerLayout()) { - .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, self.target.*)), + const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { + .Auto, .Extern => container_ty.structFieldOffset(index, self.target.*), .Packed => if (container_ty.zigTypeTag() == .Struct and ptr_field_ty.ptrInfo().data.host_size == 0) container_ty.packedStructFieldByteOffset(index, self.target.*) else 0, - }; - - const result: MCValue = result: { - switch (mcv) { - .load_frame, .lea_tlv, .load_tlv => { - const offset_reg = try self.copyToTmpRegister(Type.usize, .{ - .immediate = field_offset, - }); - const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); - defer self.register_manager.unlockReg(offset_reg_lock); - - const dst_mcv = try self.copyToRegisterWithInstTracking(inst, Type.usize, switch (mcv) { - .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, - else => mcv, - }); - try self.genBinOpMir(.{ ._, .add }, Type.usize, dst_mcv, .{ .register = offset_reg }); - break :result dst_mcv; - }, - .indirect => |reg_off| break :result .{ .indirect = .{ - .reg = reg_off.reg, - .off = reg_off.off + @intCast(i32, field_offset), - } }, - .lea_frame => |frame_addr| break :result .{ .lea_frame = .{ - .index = frame_addr.index, - .off = frame_addr.off + @intCast(i32, field_offset), - } }, - .register, .register_offset => { - const src_reg = mcv.getReg().?; - const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); - defer self.register_manager.unlockReg(src_lock); + }); - const dst_mcv: MCValue = if (self.reuseOperand(inst, operand, 0, mcv)) - mcv - else - .{ .register = try self.copyToTmpRegister(ptr_field_ty, mcv) }; - break :result .{ .register_offset = .{ - .reg = dst_mcv.getReg().?, - .off = switch (dst_mcv) { - .register => 0, - .register_offset => |reg_off| reg_off.off, - else => unreachable, - } + @intCast(i32, field_offset), - } }; - }, - else => return self.fail("TODO implement fieldPtr for {}", .{mcv}), - } - }; - return result; + const src_mcv = try self.resolveInst(operand); + const dst_mcv = if (switch (src_mcv) { + .immediate, .lea_frame => true, + .register, .register_offset => self.reuseOperand(inst, operand, 0, src_mcv), + else => false, + }) src_mcv else try self.copyToRegisterWithInstTracking(inst, ptr_field_ty, src_mcv); + return dst_mcv.offset(field_offset); } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { @@ -6717,7 +6677,6 @@ fn genBinOpMir( .dead, .undef, .immediate, - .register_offset, .eflags, .register_overflow, .lea_direct, @@ -6726,7 +6685,9 @@ fn genBinOpMir( .lea_frame, .reserved_frame, => unreachable, // unmodifiable destination - .register => |dst_reg| { + .register, .register_offset => { + assert(dst_mcv.isRegister()); + const dst_reg = dst_mcv.getReg().?; const dst_alias = registerAlias(dst_reg, abi_size); switch (src_mcv) { .none, @@ -8625,11 +8586,7 @@ fn movMirTag(self: *Self, ty: Type, aligned: bool) !Mir.Inst.FixedTag { } fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const src_lock = switch (src_mcv) { - .register => |reg| self.register_manager.lockReg(reg), - .register_overflow => |ro| self.register_manager.lockReg(ro.reg), - else => null, - }; + const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); switch (dst_mcv) { diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig index 1310b96678..80026bdc4b 100644 --- a/test/behavior/field_parent_ptr.zig +++ b/test/behavior/field_parent_ptr.zig @@ -11,7 +11,6 @@ test "@fieldParentPtr non-first field" { } test "@fieldParentPtr first field" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -51,7 +50,6 @@ fn testParentFieldPtrFirst(a: *const bool) !void { } test "@fieldParentPtr untagged union" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -79,7 +77,6 @@ fn testFieldParentPtrUnion(c: *const i32) !void { } test "@fieldParentPtr tagged union" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -107,7 +104,6 @@ fn testFieldParentPtrTaggedUnion(c: *const i32) !void { } test "@fieldParentPtr extern union" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO diff --git a/test/behavior/union.zig b/test/behavior/union.zig index c84072fb8d..c6acfb3f84 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1164,7 +1164,6 @@ test "union enum type gets a separate scope" { } test "global variable struct contains union initialized to non-most-aligned field" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From c23e80e671686278ea2ea23d164a2c0839ca372c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 9 May 2023 03:15:27 -0400 Subject: x86_64: implement `@splat` --- src/arch/x86_64/CodeGen.zig | 208 +++++++++++++++++++++++++++++++++++++++++- src/arch/x86_64/Encoding.zig | 13 ++- src/arch/x86_64/Lower.zig | 2 + src/arch/x86_64/Mir.zig | 15 +++ src/arch/x86_64/encodings.zig | 41 ++++++++- 5 files changed, 270 insertions(+), 9 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index d1bc23b826..29232b5284 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8561,7 +8561,8 @@ fn movMirTag(self: *Self, ty: Type, aligned: bool) !Mir.Inst.FixedTag { }, 32 => switch (ty.vectorLen()) { 1 => return if (self.hasFeature(.avx)) .{ .v_ss, .mov } else .{ ._ss, .mov }, - 2...4 => return if (self.hasFeature(.avx)) + 2 => return if (self.hasFeature(.avx)) .{ .v_sd, .mov } else .{ ._sd, .mov }, + 3...4 => return if (self.hasFeature(.avx)) if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, 5...8 => if (self.hasFeature(.avx)) @@ -8577,6 +8578,14 @@ fn movMirTag(self: *Self, ty: Type, aligned: bool) !Mir.Inst.FixedTag { return if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }, else => {}, }, + 128 => switch (ty.vectorLen()) { + 1 => return if (self.hasFeature(.avx)) + if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, + 2 => if (self.hasFeature(.avx)) + return if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }, + else => {}, + }, else => {}, }, else => {}, @@ -9939,9 +9948,200 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { fn airSplat(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - _ = ty_op; - return self.fail("TODO implement airSplat for x86_64", .{}); - //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + const vector_ty = self.air.typeOfIndex(inst); + const dst_rc = regClassForType(vector_ty); + const scalar_ty = vector_ty.scalarType(); + + const src_mcv = try self.resolveInst(ty_op.operand); + const result: MCValue = result: { + switch (scalar_ty.zigTypeTag()) { + else => {}, + .Float => switch (scalar_ty.floatBits(self.target.*)) { + 32 => switch (vector_ty.vectorLen()) { + 1 => { + if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; + const dst_reg = try self.register_manager.allocReg(inst, dst_rc); + try self.genSetReg(dst_reg, scalar_ty, src_mcv); + break :result .{ .register = dst_reg }; + }, + 2...4 => { + if (self.hasFeature(.avx)) { + const dst_reg = try self.register_manager.allocReg(inst, dst_rc); + if (src_mcv.isMemory()) try self.asmRegisterMemory( + .{ .v_ss, .broadcast }, + dst_reg.to128(), + src_mcv.mem(.dword), + ) else { + const src_reg = if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(scalar_ty, src_mcv); + try self.asmRegisterRegisterRegisterImmediate( + .{ .v_ps, .shuf }, + dst_reg.to128(), + src_reg.to128(), + src_reg.to128(), + Immediate.u(0), + ); + } + break :result .{ .register = dst_reg }; + } else { + const dst_mcv = if (src_mcv.isRegister() and + self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) + src_mcv + else + try self.copyToRegisterWithInstTracking(inst, scalar_ty, src_mcv); + const dst_reg = dst_mcv.getReg().?; + try self.asmRegisterRegisterImmediate( + .{ ._ps, .shuf }, + dst_reg.to128(), + dst_reg.to128(), + Immediate.u(0), + ); + break :result dst_mcv; + } + }, + 5...8 => if (self.hasFeature(.avx)) { + const dst_reg = try self.register_manager.allocReg(inst, dst_rc); + if (src_mcv.isMemory()) try self.asmRegisterMemory( + .{ .v_ss, .broadcast }, + dst_reg.to256(), + src_mcv.mem(.dword), + ) else { + const src_reg = if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(scalar_ty, src_mcv); + if (self.hasFeature(.avx2)) try self.asmRegisterRegister( + .{ .v_ss, .broadcast }, + dst_reg.to256(), + src_reg.to128(), + ) else { + try self.asmRegisterRegisterRegisterImmediate( + .{ .v_ps, .shuf }, + dst_reg.to128(), + src_reg.to128(), + src_reg.to128(), + Immediate.u(0), + ); + try self.asmRegisterRegisterRegisterImmediate( + .{ .v_f128, .insert }, + dst_reg.to256(), + dst_reg.to256(), + dst_reg.to128(), + Immediate.u(1), + ); + } + } + break :result .{ .register = dst_reg }; + }, + else => {}, + }, + 64 => switch (vector_ty.vectorLen()) { + 1 => { + if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; + const dst_reg = try self.register_manager.allocReg(inst, dst_rc); + try self.genSetReg(dst_reg, scalar_ty, src_mcv); + break :result .{ .register = dst_reg }; + }, + 2 => { + const dst_reg = try self.register_manager.allocReg(inst, dst_rc); + if (self.hasFeature(.sse3)) { + if (src_mcv.isMemory()) try self.asmRegisterMemory( + if (self.hasFeature(.avx)) .{ .v_, .movddup } else .{ ._, .movddup }, + dst_reg.to128(), + src_mcv.mem(.qword), + ) else try self.asmRegisterRegister( + if (self.hasFeature(.avx)) .{ .v_, .movddup } else .{ ._, .movddup }, + dst_reg.to128(), + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(scalar_ty, src_mcv)).to128(), + ); + break :result .{ .register = dst_reg }; + } else try self.asmRegisterRegister( + .{ ._ps, .movlh }, + dst_reg.to128(), + (if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(scalar_ty, src_mcv)).to128(), + ); + }, + 3...4 => if (self.hasFeature(.avx)) { + const dst_reg = try self.register_manager.allocReg(inst, dst_rc); + if (src_mcv.isMemory()) try self.asmRegisterMemory( + .{ .v_sd, .broadcast }, + dst_reg.to256(), + src_mcv.mem(.qword), + ) else { + const src_reg = if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(scalar_ty, src_mcv); + if (self.hasFeature(.avx2)) try self.asmRegisterRegister( + .{ .v_sd, .broadcast }, + dst_reg.to256(), + src_reg.to128(), + ) else { + try self.asmRegisterRegister( + .{ .v_, .movddup }, + dst_reg.to128(), + src_reg.to128(), + ); + try self.asmRegisterRegisterRegisterImmediate( + .{ .v_f128, .insert }, + dst_reg.to256(), + dst_reg.to256(), + dst_reg.to128(), + Immediate.u(1), + ); + } + } + break :result .{ .register = dst_reg }; + }, + else => {}, + }, + 128 => switch (vector_ty.vectorLen()) { + 1 => { + if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; + const dst_reg = try self.register_manager.allocReg(inst, dst_rc); + try self.genSetReg(dst_reg, scalar_ty, src_mcv); + break :result .{ .register = dst_reg }; + }, + 2 => if (self.hasFeature(.avx)) { + const dst_reg = try self.register_manager.allocReg(inst, dst_rc); + if (src_mcv.isMemory()) try self.asmRegisterMemory( + .{ .v_f128, .broadcast }, + dst_reg.to256(), + src_mcv.mem(.xword), + ) else { + const src_reg = if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(scalar_ty, src_mcv); + try self.asmRegisterRegisterRegisterImmediate( + .{ .v_f128, .insert }, + dst_reg.to256(), + src_reg.to256(), + src_reg.to128(), + Immediate.u(1), + ); + } + break :result .{ .register = dst_reg }; + }, + else => {}, + }, + 16, 80 => {}, + else => unreachable, + }, + } + return self.fail("TODO implement airSplat for {}", .{ + vector_ty.fmt(self.bin_file.options.module.?), + }); + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airSelect(self: *Self, inst: Air.Inst.Index) !void { diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index b6b49e8939..073128b85e 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -270,10 +270,12 @@ pub const Mnemonic = enum { divps, divss, maxps, maxss, minps, minss, - movaps, movhlps, movss, movups, + movaps, movhlps, movlhps, + movss, movups, mulps, mulss, orps, pextrw, pinsrw, + shufps, sqrtps, sqrtss, subps, subss, ucomiss, @@ -296,6 +298,7 @@ pub const Mnemonic = enum { psrld, psrlq, psrlw, punpckhbw, punpckhdq, punpckhqdq, punpckhwd, punpcklbw, punpckldq, punpcklqdq, punpcklwd, + shufpd, sqrtpd, sqrtsd, subpd, subsd, ucomisd, @@ -303,17 +306,22 @@ pub const Mnemonic = enum { // SSE3 movddup, movshdup, movsldup, // SSE4.1 + extractps, + insertps, pextrb, pextrd, pextrq, pinsrb, pinsrd, pinsrq, roundpd, roundps, roundsd, roundss, // AVX vaddpd, vaddps, vaddsd, vaddss, + vbroadcastf128, vbroadcastsd, vbroadcastss, vcvtsd2ss, vcvtsi2sd, vcvtsi2ss, vcvtss2sd, vdivpd, vdivps, vdivsd, vdivss, + vextractf128, vextractps, + vinsertf128, vinsertps, vmaxpd, vmaxps, vmaxsd, vmaxss, vminpd, vminps, vminsd, vminss, vmovapd, vmovaps, - vmovddup, vmovhlps, + vmovddup, vmovhlps, vmovlhps, vmovsd, vmovshdup, vmovsldup, vmovss, @@ -326,6 +334,7 @@ pub const Mnemonic = enum { vpunpckhbw, vpunpckhdq, vpunpckhqdq, vpunpckhwd, vpunpcklbw, vpunpckldq, vpunpcklqdq, vpunpcklwd, vroundpd, vroundps, vroundsd, vroundss, + vshufpd, vshufps, vsqrtpd, vsqrtps, vsqrtsd, vsqrtss, vsubpd, vsubps, vsubsd, vsubss, // F16C diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index c32e7fc974..c893429912 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -300,6 +300,8 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void { else .none, }, mnemonic: { + @setEvalBranchQuota(2_000); + comptime var max_len = 0; inline for (@typeInfo(Mnemonic).Enum.fields) |field| max_len = @max(field.name.len, max_len); var buf: [max_len]u8 = undefined; diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 0a7b5597b3..18c2903045 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -256,6 +256,8 @@ pub const Inst = struct { v_sd, /// VEX-Encoded ___ Packed Double-Precision Values v_pd, + /// VEX-Encoded ___ 128-Bits Of Floating-Point Data + v_f128, /// Mask ___ Byte k_b, @@ -454,6 +456,8 @@ pub const Inst = struct { mova, /// Move packed single-precision floating-point values high to low movhl, + /// Move packed single-precision floating-point values low to high + movlh, /// Move unaligned packed single-precision floating-point values /// Move unaligned packed double-precision floating-point values movu, @@ -488,6 +492,9 @@ pub const Inst = struct { cvtsi2sd, /// Convert scalar single-precision floating-point value to scalar double-precision floating-point value cvtss2sd, + /// Packed interleave shuffle of quadruplets of single-precision floating-point values + /// Packed interleave shuffle of pairs of double-precision floating-point values + shuf, /// Shuffle packed high words shufh, /// Shuffle packed low words @@ -520,12 +527,20 @@ pub const Inst = struct { /// Replicate single floating-point values movsldup, + /// Extract packed floating-point values + extract, + /// Insert scalar single-precision floating-point value + /// Insert packed floating-point values + insert, /// Round packed single-precision floating-point values /// Round scalar single-precision floating-point value /// Round packed double-precision floating-point values /// Round scalar double-precision floating-point value round, + /// Load with broadcast floating-point data + broadcast, + /// Convert 16-bit floating-point values to single-precision floating-point values cvtph2ps, /// Convert single-precision floating-point values to 16-bit floating-point values diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 2b9d530c1e..f56f31da7f 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -867,6 +867,8 @@ pub const table = [_]Entry{ .{ .movhlps, .rm, &.{ .xmm, .xmm }, &.{ 0x0f, 0x12 }, 0, .none, .sse }, + .{ .movlhps, .rm, &.{ .xmm, .xmm }, &.{ 0x0f, 0x16 }, 0, .none, .sse }, + .{ .movss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .none, .sse }, .{ .movss, .mr, &.{ .xmm_m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .none, .sse }, @@ -879,14 +881,16 @@ pub const table = [_]Entry{ .{ .orps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .none, .sse }, - .{ .subps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5c }, 0, .none, .sse }, - - .{ .subss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .none, .sse }, + .{ .shufps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x0f, 0xc6 }, 0, .none, .sse }, .{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .none, .sse }, .{ .sqrtss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .none, .sse }, + .{ .subps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5c }, 0, .none, .sse }, + + .{ .subss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .none, .sse }, + .{ .ucomiss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0x0f, 0x2e }, 0, .none, .sse }, .{ .xorps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x57 }, 0, .none, .sse }, @@ -967,6 +971,8 @@ pub const table = [_]Entry{ .{ .punpckldq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .none, .sse2 }, .{ .punpcklqdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .none, .sse2 }, + .{ .shufpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0xc6 }, 0, .none, .sse2 }, + .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .none, .sse2 }, .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .none, .sse2 }, @@ -990,6 +996,10 @@ pub const table = [_]Entry{ .{ .movsldup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .none, .sse3 }, // SSE4.1 + .{ .extractps, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x17 }, 0, .none, .sse4_1 }, + + .{ .insertps, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x21 }, 0, .none, .sse4_1 }, + .{ .pextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .none, .sse4_1 }, .{ .pextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .none, .sse4_1 }, .{ .pextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .long, .sse4_1 }, @@ -1019,6 +1029,11 @@ pub const table = [_]Entry{ .{ .vaddss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .vex_lig_wig, .avx }, + .{ .vbroadcastss, .rm, &.{ .xmm, .m32 }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_128_w0, .avx }, + .{ .vbroadcastss, .rm, &.{ .ymm, .m32 }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx }, + .{ .vbroadcastsd, .rm, &.{ .ymm, .m64 }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx }, + .{ .vbroadcastf128, .rm, &.{ .ymm, .m128 }, &.{ 0x66, 0x0f, 0x38, 0x1a }, 0, .vex_256_w0, .avx }, + .{ .vcvtsd2ss, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .vex_lig_wig, .avx }, .{ .vcvtsi2sd, .rvm, &.{ .xmm, .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w0, .avx }, @@ -1039,6 +1054,14 @@ pub const table = [_]Entry{ .{ .vdivss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5e }, 0, .vex_lig_wig, .avx }, + .{ .vextractf128, .mri, &.{ .xmm_m128, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x19 }, 0, .vex_256_w0, .avx }, + + .{ .vextractps, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x17 }, 0, .vex_128_wig, .avx }, + + .{ .vinsertf128, .rvmi, &.{ .ymm, .ymm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x18 }, 0, .vex_256_w0, .avx }, + + .{ .vinsertps, .rvmi, &.{ .xmm, .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x21 }, 0, .vex_128_wig, .avx }, + .{ .vmaxpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5f }, 0, .vex_128_wig, .avx }, .{ .vmaxpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5f }, 0, .vex_256_wig, .avx }, @@ -1074,6 +1097,8 @@ pub const table = [_]Entry{ .{ .vmovhlps, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0x0f, 0x12 }, 0, .vex_128_wig, .avx }, + .{ .vmovlhps, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0x0f, 0x16 }, 0, .vex_128_wig, .avx }, + .{ .vmovsd, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, .{ .vmovsd, .rm, &.{ .xmm, .m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, .{ .vmovsd, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, @@ -1150,6 +1175,12 @@ pub const table = [_]Entry{ .{ .vroundss, .rvmi, &.{ .xmm, .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .vex_lig_wig, .avx }, + .{ .vshufpd, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0xc6 }, 0, .vex_128_wig, .avx }, + .{ .vshufpd, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0xc6 }, 0, .vex_256_wig, .avx }, + + .{ .vshufps, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x0f, 0xc6 }, 0, .vex_128_wig, .avx }, + .{ .vshufps, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x0f, 0xc6 }, 0, .vex_256_wig, .avx }, + .{ .vsqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .vex_128_wig, .avx }, .{ .vsqrtpd, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x51 }, 0, .vex_256_wig, .avx }, @@ -1201,6 +1232,10 @@ pub const table = [_]Entry{ .{ .vfmadd231ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_lig_w0, .fma }, // AVX2 + .{ .vbroadcastss, .rm, &.{ .xmm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_128_w0, .avx2 }, + .{ .vbroadcastss, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx2 }, + .{ .vbroadcastsd, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx2 }, + .{ .vpsrlw, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_256_wig, .avx2 }, .{ .vpsrlw, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_256_wig, .avx2 }, .{ .vpsrld, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_256_wig, .avx2 }, -- cgit v1.2.3 From 3681da25f865d499cffe923b7f0721cf759d3591 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 12 May 2023 01:16:52 -0400 Subject: x86_64: remove scratch data tags --- src/arch/x86_64/CodeGen.zig | 8 ++++---- src/arch/x86_64/Lower.zig | 32 ++++++++++++++++---------------- src/arch/x86_64/Mir.zig | 24 ++++++------------------ 3 files changed, 26 insertions(+), 38 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 29232b5284..4f5bf89989 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1079,9 +1079,9 @@ fn asmSetccRegister(self: *Self, reg: Register, cc: bits.Condition) !void { .fixes = Mir.Inst.Fixes.fromCondition(cc), .r1 = reg, } }, - .z_and_np, .nz_or_p => .{ .r_scratch = .{ + .z_and_np, .nz_or_p => .{ .rr = .{ .r1 = reg, - .scratch_reg = (try self.register_manager.allocReg(null, gp)).to8(), + .r2 = (try self.register_manager.allocReg(null, gp)).to8(), } }, }, }); @@ -1120,8 +1120,8 @@ fn asmSetccMemory(self: *Self, m: Memory, cc: bits.Condition) !void { .fixes = Mir.Inst.Fixes.fromCondition(cc), .payload = payload, } }, - .z_and_np, .nz_or_p => .{ .x_scratch = .{ - .scratch_reg = (try self.register_manager.allocReg(null, gp)).to8(), + .z_and_np, .nz_or_p => .{ .rx = .{ + .r1 = (try self.register_manager.allocReg(null, gp)).to8(), .payload = payload, } }, }, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index c893429912..f6bce992e6 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -95,54 +95,54 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }, .pseudo_set_z_and_np_r => { try lower.emit(.none, .setz, &.{ - .{ .reg = inst.data.r_scratch.r1 }, + .{ .reg = inst.data.rr.r1 }, }); try lower.emit(.none, .setnp, &.{ - .{ .reg = inst.data.r_scratch.scratch_reg }, + .{ .reg = inst.data.rr.r2 }, }); try lower.emit(.none, .@"and", &.{ - .{ .reg = inst.data.r_scratch.r1 }, - .{ .reg = inst.data.r_scratch.scratch_reg }, + .{ .reg = inst.data.rr.r1 }, + .{ .reg = inst.data.rr.r2 }, }); }, .pseudo_set_z_and_np_m_sib, .pseudo_set_z_and_np_m_rip, => { try lower.emit(.none, .setz, &.{ - .{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) }, + .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, }); try lower.emit(.none, .setnp, &.{ - .{ .reg = inst.data.x_scratch.scratch_reg }, + .{ .reg = inst.data.rx.r1 }, }); try lower.emit(.none, .@"and", &.{ - .{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) }, - .{ .reg = inst.data.x_scratch.scratch_reg }, + .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, + .{ .reg = inst.data.rx.r1 }, }); }, .pseudo_set_nz_or_p_r => { try lower.emit(.none, .setnz, &.{ - .{ .reg = inst.data.r_scratch.r1 }, + .{ .reg = inst.data.rr.r1 }, }); try lower.emit(.none, .setp, &.{ - .{ .reg = inst.data.r_scratch.scratch_reg }, + .{ .reg = inst.data.rr.r2 }, }); try lower.emit(.none, .@"or", &.{ - .{ .reg = inst.data.r_scratch.r1 }, - .{ .reg = inst.data.r_scratch.scratch_reg }, + .{ .reg = inst.data.rr.r1 }, + .{ .reg = inst.data.rr.r2 }, }); }, .pseudo_set_nz_or_p_m_sib, .pseudo_set_nz_or_p_m_rip, => { try lower.emit(.none, .setnz, &.{ - .{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) }, + .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, }); try lower.emit(.none, .setp, &.{ - .{ .reg = inst.data.x_scratch.scratch_reg }, + .{ .reg = inst.data.rx.r1 }, }); try lower.emit(.none, .@"or", &.{ - .{ .mem = lower.mem(inst.ops, inst.data.x_scratch.payload) }, - .{ .reg = inst.data.x_scratch.scratch_reg }, + .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, + .{ .reg = inst.data.rx.r1 }, }); }, .pseudo_j_z_and_np_inst => { diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 18c2903045..919974e7d2 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -711,27 +711,27 @@ pub const Inst = struct { pseudo_cmov_nz_or_p_rm_rip, /// Set byte if zero flag set and parity flag not set /// Requires a scratch register! - /// Uses `r_scratch` payload. + /// Uses `rr` payload. pseudo_set_z_and_np_r, /// Set byte if zero flag set and parity flag not set /// Requires a scratch register! - /// Uses `x_scratch` payload. + /// Uses `rx` payload. pseudo_set_z_and_np_m_sib, /// Set byte if zero flag set and parity flag not set /// Requires a scratch register! - /// Uses `x_scratch` payload. + /// Uses `rx` payload. pseudo_set_z_and_np_m_rip, /// Set byte if zero flag not set or parity flag set /// Requires a scratch register! - /// Uses `r_scratch` payload. + /// Uses `rr` payload. pseudo_set_nz_or_p_r, /// Set byte if zero flag not set or parity flag set /// Requires a scratch register! - /// Uses `x_scratch` payload. + /// Uses `rx` payload. pseudo_set_nz_or_p_m_sib, /// Set byte if zero flag not set or parity flag set /// Requires a scratch register! - /// Uses `x_scratch` payload. + /// Uses `rx` payload. pseudo_set_nz_or_p_m_rip, /// Jump if zero flag set and parity flag not set /// Uses `inst` payload. @@ -836,18 +836,6 @@ pub const Inst = struct { i: u8, payload: u32, }, - /// Register, scratch register - r_scratch: struct { - fixes: Fixes = ._, - r1: Register, - scratch_reg: Register, - }, - /// Scratch register, followed by Custom payload found in extra. - x_scratch: struct { - fixes: Fixes = ._, - scratch_reg: Register, - payload: u32, - }, /// Custom payload found in extra. x: struct { fixes: Fixes = ._, -- cgit v1.2.3 From f83ebd8e6c95cf37d498936cae26d3a743cddc7f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 12 May 2023 02:11:37 -0400 Subject: x86_64: implement stack probing --- src/arch/x86_64/CodeGen.zig | 63 ++++++++++++++++++++++++----- src/arch/x86_64/Lower.zig | 97 +++++++++++++++++++++++++++++++++++++++++++++ src/arch/x86_64/Mir.zig | 12 ++++++ test/behavior/memset.zig | 2 - 4 files changed, 163 insertions(+), 11 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 4f5bf89989..523faa5cb2 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1550,7 +1550,9 @@ fn gen(self: *Self) InnerError!void { const backpatch_push_callee_preserved_regs = try self.asmPlaceholder(); try self.asmRegisterRegister(.{ ._, .mov }, .rbp, .rsp); const backpatch_frame_align = try self.asmPlaceholder(); + const backpatch_frame_align_extra = try self.asmPlaceholder(); const backpatch_stack_alloc = try self.asmPlaceholder(); + const backpatch_stack_alloc_extra = try self.asmPlaceholder(); switch (self.ret_mcv.long) { .none, .unreach => {}, @@ -1599,24 +1601,67 @@ fn gen(self: *Self) InnerError!void { const need_stack_adjust = frame_layout.stack_adjust > 0; const need_save_reg = frame_layout.save_reg_list.count() > 0; if (need_frame_align) { + const page_align = @as(u32, math.maxInt(u32)) << 12; self.mir_instructions.set(backpatch_frame_align, .{ .tag = .@"and", .ops = .ri_s, .data = .{ .ri = .{ .r1 = .rsp, - .i = frame_layout.stack_mask, + .i = @max(frame_layout.stack_mask, page_align), } }, }); + if (frame_layout.stack_mask < page_align) { + self.mir_instructions.set(backpatch_frame_align_extra, .{ + .tag = .pseudo, + .ops = .pseudo_probe_align_ri_s, + .data = .{ .ri = .{ + .r1 = .rsp, + .i = ~frame_layout.stack_mask & page_align, + } }, + }); + } } if (need_stack_adjust) { - self.mir_instructions.set(backpatch_stack_alloc, .{ - .tag = .sub, - .ops = .ri_s, - .data = .{ .ri = .{ - .r1 = .rsp, - .i = frame_layout.stack_adjust, - } }, - }); + const page_size: u32 = 1 << 12; + if (frame_layout.stack_adjust <= page_size) { + self.mir_instructions.set(backpatch_stack_alloc, .{ + .tag = .sub, + .ops = .ri_s, + .data = .{ .ri = .{ + .r1 = .rsp, + .i = frame_layout.stack_adjust, + } }, + }); + } else if (frame_layout.stack_adjust < + page_size * Lower.pseudo_probe_adjust_unrolled_max_insts) + { + self.mir_instructions.set(backpatch_stack_alloc, .{ + .tag = .pseudo, + .ops = .pseudo_probe_adjust_unrolled_ri_s, + .data = .{ .ri = .{ + .r1 = .rsp, + .i = frame_layout.stack_adjust, + } }, + }); + } else { + self.mir_instructions.set(backpatch_stack_alloc, .{ + .tag = .pseudo, + .ops = .pseudo_probe_adjust_setup_rri_s, + .data = .{ .rri = .{ + .r1 = .rsp, + .r2 = .rax, + .i = frame_layout.stack_adjust, + } }, + }); + self.mir_instructions.set(backpatch_stack_alloc_extra, .{ + .tag = .pseudo, + .ops = .pseudo_probe_adjust_loop_rr, + .data = .{ .rr = .{ + .r1 = .rsp, + .r2 = .rax, + } }, + }); + } } if (need_frame_align or need_stack_adjust) { self.mir_instructions.set(backpatch_stack_dealloc, .{ diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index f6bce992e6..65d2b64398 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -9,19 +9,33 @@ result_insts_len: u8 = undefined, result_relocs_len: u8 = undefined, result_insts: [ std.mem.max(usize, &.{ + 1, // non-pseudo instructions 2, // cmovcc: cmovcc \ cmovcc 3, // setcc: setcc \ setcc \ logicop 2, // jcc: jcc \ jcc + pseudo_probe_align_insts, + pseudo_probe_adjust_unrolled_max_insts, + pseudo_probe_adjust_setup_insts, + pseudo_probe_adjust_loop_insts, abi.Win64.callee_preserved_regs.len, // push_regs/pop_regs abi.SysV.callee_preserved_regs.len, // push_regs/pop_regs }) ]Instruction = undefined, result_relocs: [ std.mem.max(usize, &.{ + 1, // jmp/jcc/call/mov/lea: jmp/jcc/call/mov/lea 2, // jcc: jcc \ jcc + 2, // test \ jcc \ probe \ sub \ jmp + 1, // probe \ sub \ jcc }) ]Reloc = undefined, +pub const pseudo_probe_align_insts = 5; // test \ jcc \ probe \ sub \ jmp +pub const pseudo_probe_adjust_unrolled_max_insts = + pseudo_probe_adjust_setup_insts + pseudo_probe_adjust_loop_insts; +pub const pseudo_probe_adjust_setup_insts = 2; // mov \ sub +pub const pseudo_probe_adjust_loop_insts = 3; // probe \ sub \ jcc + pub const Error = error{ OutOfMemory, LowerFail, @@ -62,6 +76,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { else => try lower.generic(inst), .pseudo => switch (inst.ops) { .pseudo_cmov_z_and_np_rr => { + assert(inst.data.rr.fixes == ._); try lower.emit(.none, .cmovnz, &.{ .{ .reg = inst.data.rr.r2 }, .{ .reg = inst.data.rr.r1 }, @@ -72,6 +87,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, .pseudo_cmov_nz_or_p_rr => { + assert(inst.data.rr.fixes == ._); try lower.emit(.none, .cmovnz, &.{ .{ .reg = inst.data.rr.r1 }, .{ .reg = inst.data.rr.r2 }, @@ -84,6 +100,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_cmov_nz_or_p_rm_sib, .pseudo_cmov_nz_or_p_rm_rip, => { + assert(inst.data.rx.fixes == ._); try lower.emit(.none, .cmovnz, &.{ .{ .reg = inst.data.rx.r1 }, .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, @@ -94,6 +111,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, .pseudo_set_z_and_np_r => { + assert(inst.data.rr.fixes == ._); try lower.emit(.none, .setz, &.{ .{ .reg = inst.data.rr.r1 }, }); @@ -108,6 +126,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_set_z_and_np_m_sib, .pseudo_set_z_and_np_m_rip, => { + assert(inst.data.rx.fixes == ._); try lower.emit(.none, .setz, &.{ .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, }); @@ -120,6 +139,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, .pseudo_set_nz_or_p_r => { + assert(inst.data.rr.fixes == ._); try lower.emit(.none, .setnz, &.{ .{ .reg = inst.data.rr.r1 }, }); @@ -134,6 +154,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { .pseudo_set_nz_or_p_m_sib, .pseudo_set_nz_or_p_m_rip, => { + assert(inst.data.rx.fixes == ._); try lower.emit(.none, .setnz, &.{ .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, }); @@ -146,6 +167,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, .pseudo_j_z_and_np_inst => { + assert(inst.data.inst.fixes == ._); try lower.emit(.none, .jnz, &.{ .{ .imm = lower.reloc(.{ .inst = index + 1 }) }, }); @@ -154,6 +176,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, .pseudo_j_nz_or_p_inst => { + assert(inst.data.inst.fixes == ._); try lower.emit(.none, .jnz, &.{ .{ .imm = lower.reloc(.{ .inst = inst.data.inst.inst }) }, }); @@ -162,6 +185,78 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct { }); }, + .pseudo_probe_align_ri_s => { + try lower.emit(.none, .@"test", &.{ + .{ .reg = inst.data.ri.r1 }, + .{ .imm = Immediate.s(@bitCast(i32, inst.data.ri.i)) }, + }); + try lower.emit(.none, .jz, &.{ + .{ .imm = lower.reloc(.{ .inst = index + 1 }) }, + }); + try lower.emit(.none, .lea, &.{ + .{ .reg = inst.data.ri.r1 }, + .{ .mem = Memory.sib(.qword, .{ + .base = .{ .reg = inst.data.ri.r1 }, + .disp = -page_size, + }) }, + }); + try lower.emit(.none, .@"test", &.{ + .{ .mem = Memory.sib(.dword, .{ + .base = .{ .reg = inst.data.ri.r1 }, + }) }, + .{ .reg = inst.data.ri.r1.to32() }, + }); + try lower.emit(.none, .jmp, &.{ + .{ .imm = lower.reloc(.{ .inst = index }) }, + }); + assert(lower.result_insts_len == pseudo_probe_align_insts); + }, + .pseudo_probe_adjust_unrolled_ri_s => { + var offset = page_size; + while (offset < @bitCast(i32, inst.data.ri.i)) : (offset += page_size) { + try lower.emit(.none, .@"test", &.{ + .{ .mem = Memory.sib(.dword, .{ + .base = .{ .reg = inst.data.ri.r1 }, + .disp = -offset, + }) }, + .{ .reg = inst.data.ri.r1.to32() }, + }); + } + try lower.emit(.none, .sub, &.{ + .{ .reg = inst.data.ri.r1 }, + .{ .imm = Immediate.s(@bitCast(i32, inst.data.ri.i)) }, + }); + assert(lower.result_insts_len <= pseudo_probe_adjust_unrolled_max_insts); + }, + .pseudo_probe_adjust_setup_rri_s => { + try lower.emit(.none, .mov, &.{ + .{ .reg = inst.data.rri.r2.to32() }, + .{ .imm = Immediate.s(@bitCast(i32, inst.data.rri.i)) }, + }); + try lower.emit(.none, .sub, &.{ + .{ .reg = inst.data.rri.r1 }, + .{ .reg = inst.data.rri.r2 }, + }); + assert(lower.result_insts_len == pseudo_probe_adjust_setup_insts); + }, + .pseudo_probe_adjust_loop_rr => { + try lower.emit(.none, .@"test", &.{ + .{ .mem = Memory.sib(.dword, .{ + .base = .{ .reg = inst.data.rr.r1 }, + .scale_index = .{ .scale = 1, .index = inst.data.rr.r2 }, + .disp = -page_size, + }) }, + .{ .reg = inst.data.rr.r1.to32() }, + }); + try lower.emit(.none, .sub, &.{ + .{ .reg = inst.data.rr.r2 }, + .{ .imm = Immediate.s(page_size) }, + }); + try lower.emit(.none, .jae, &.{ + .{ .imm = lower.reloc(.{ .inst = index }) }, + }); + assert(lower.result_insts_len == pseudo_probe_adjust_loop_insts); + }, .pseudo_push_reg_list => try lower.pushPopRegList(.push, inst), .pseudo_pop_reg_list => try lower.pushPopRegList(.pop, inst), @@ -440,6 +535,8 @@ fn pushPopRegList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Er }}); } +const page_size: i32 = 1 << 12; + const abi = @import("abi.zig"); const assert = std.debug.assert; const bits = @import("bits.zig"); diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 919974e7d2..f26bf97e82 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -740,6 +740,18 @@ pub const Inst = struct { /// Uses `inst` payload. pseudo_j_nz_or_p_inst, + /// Probe alignment + /// Uses `ri` payload + pseudo_probe_align_ri_s, + /// Probe adjust unrolled + /// Uses `ri` payload + pseudo_probe_adjust_unrolled_ri_s, + /// Probe adjust setup + /// Uses `rri` payload + pseudo_probe_adjust_setup_rri_s, + /// Probe adjust loop + /// Uses `rr` payload + pseudo_probe_adjust_loop_rr, /// Push registers /// Uses `reg_list` payload. pseudo_push_reg_list, diff --git a/test/behavior/memset.zig b/test/behavior/memset.zig index e111b5c523..be09ef655f 100644 --- a/test/behavior/memset.zig +++ b/test/behavior/memset.zig @@ -120,7 +120,6 @@ test "memset with large array element, runtime known" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; const A = [128]u64; var buf: [5]A = undefined; @@ -139,7 +138,6 @@ test "memset with large array element, comptime known" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) return error.SkipZigTest; const A = [128]u64; var buf: [5]A = undefined; -- cgit v1.2.3 From 2cbd442a9df16ab2d13d03041631f516269c9f64 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 13 May 2023 03:05:49 -0400 Subject: x86_64: implement integer vector movement --- src/arch/x86_64/CodeGen.zig | 366 ++++++++++++++++++++++++++------------ test/behavior/maximum_minimum.zig | 1 - test/behavior/vector.zig | 3 - 3 files changed, 248 insertions(+), 122 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 523faa5cb2..c04bb1d2a5 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8579,56 +8579,174 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { return self.finishAirResult(inst, result); } -fn movMirTag(self: *Self, ty: Type, aligned: bool) !Mir.Inst.FixedTag { +const MoveStrategy = union(enum) { + move: Mir.Inst.FixedTag, + insert_extract: InsertExtract, + vex_insert_extract: InsertExtract, + + const InsertExtract = struct { + insert: Mir.Inst.FixedTag, + extract: Mir.Inst.FixedTag, + imm: Immediate, + }; +}; +fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { switch (ty.zigTypeTag()) { - else => return .{ ._, .mov }, + else => return .{ .move = .{ ._, .mov } }, .Float => switch (ty.floatBits(self.target.*)) { - 16 => unreachable, // needs special handling - 32 => return if (self.hasFeature(.avx)) .{ .v_ss, .mov } else .{ ._ss, .mov }, - 64 => return if (self.hasFeature(.avx)) .{ .v_sd, .mov } else .{ ._sd, .mov }, - 128 => return if (self.hasFeature(.avx)) + 16 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ + .insert = .{ .vp_w, .insr }, + .extract = .{ .vp_w, .extr }, + .imm = Immediate.u(0), + } } else .{ .insert_extract = .{ + .insert = .{ .p_w, .insr }, + .extract = .{ .p_w, .extr }, + .imm = Immediate.u(0), + } }, + 32 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_ss, .mov } else .{ ._ss, .mov } }, + 64 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_sd, .mov } else .{ ._sd, .mov } }, + 128 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, else => {}, }, .Vector => switch (ty.childType().zigTypeTag()) { + .Int => switch (ty.childType().intInfo(self.target.*).bits) { + 8 => switch (ty.vectorLen()) { + 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ + .insert = .{ .vp_b, .insr }, + .extract = .{ .vp_b, .extr }, + .imm = Immediate.u(0), + } } else if (self.hasFeature(.sse4_2)) return .{ .insert_extract = .{ + .insert = .{ .p_b, .insr }, + .extract = .{ .p_b, .extr }, + .imm = Immediate.u(0), + } }, + 2 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ + .insert = .{ .vp_w, .insr }, + .extract = .{ .vp_w, .extr }, + .imm = Immediate.u(0), + } } else .{ .insert_extract = .{ + .insert = .{ .p_w, .insr }, + .extract = .{ .p_w, .extr }, + .imm = Immediate.u(0), + } }, + 3...4 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_ss, .mov } + else + .{ ._ss, .mov } }, + 5...8 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_sd, .mov } + else + .{ ._sd, .mov } }, + else => {}, + }, + 16 => switch (ty.vectorLen()) { + 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ + .insert = .{ .vp_w, .insr }, + .extract = .{ .vp_w, .extr }, + .imm = Immediate.u(0), + } } else .{ .insert_extract = .{ + .insert = .{ .p_w, .insr }, + .extract = .{ .p_w, .extr }, + .imm = Immediate.u(0), + } }, + 2 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_ss, .mov } + else + .{ ._ss, .mov } }, + 3...4 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_sd, .mov } + else + .{ ._sd, .mov } }, + 5...8 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_ps, .mov } + else + .{ ._ps, .mov } }, + else => {}, + }, + 32 => switch (ty.vectorLen()) { + 1 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_ss, .mov } + else + .{ ._ss, .mov } }, + 2 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_sd, .mov } + else + .{ ._sd, .mov } }, + 3...4 => return .{ .move = if (self.hasFeature(.avx)) + if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, + 5...8 => if (self.hasFeature(.avx)) + return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, + else => {}, + }, + 64 => switch (ty.vectorLen()) { + 1 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_sd, .mov } + else + .{ ._sd, .mov } }, + 2 => return .{ .move = if (self.hasFeature(.avx)) + if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, + 3...4 => if (self.hasFeature(.avx)) + return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, + else => {}, + }, + else => {}, + }, .Float => switch (ty.childType().floatBits(self.target.*)) { 16 => switch (ty.vectorLen()) { - 1 => unreachable, // needs special handling - 2 => return if (self.hasFeature(.avx)) .{ .v_ss, .mov } else .{ ._ss, .mov }, - 3...4 => return if (self.hasFeature(.avx)) .{ .v_sd, .mov } else .{ ._sd, .mov }, - 5...8 => return if (self.hasFeature(.avx)) + 1 => {}, + 2 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_ss, .mov } + else + .{ ._ss, .mov } }, + 3...4 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_sd, .mov } + else + .{ ._sd, .mov } }, + 5...8 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, 9...16 => if (self.hasFeature(.avx)) - return if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }, + return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, else => {}, }, 32 => switch (ty.vectorLen()) { - 1 => return if (self.hasFeature(.avx)) .{ .v_ss, .mov } else .{ ._ss, .mov }, - 2 => return if (self.hasFeature(.avx)) .{ .v_sd, .mov } else .{ ._sd, .mov }, - 3...4 => return if (self.hasFeature(.avx)) + 1 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_ss, .mov } + else + .{ ._ss, .mov } }, + 2 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_sd, .mov } + else + .{ ._sd, .mov } }, + 3...4 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, 5...8 => if (self.hasFeature(.avx)) - return if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }, + return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, else => {}, }, 64 => switch (ty.vectorLen()) { - 1 => return if (self.hasFeature(.avx)) .{ .v_sd, .mov } else .{ ._sd, .mov }, - 2 => return if (self.hasFeature(.avx)) + 1 => return .{ .move = if (self.hasFeature(.avx)) + .{ .v_sd, .mov } + else + .{ ._sd, .mov } }, + 2 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, 3...4 => if (self.hasFeature(.avx)) - return if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }, + return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, else => {}, }, 128 => switch (ty.vectorLen()) { - 1 => return if (self.hasFeature(.avx)) + 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu }, + else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, 2 => if (self.hasFeature(.avx)) - return if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu }, + return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, else => {}, }, else => {}, @@ -8636,7 +8754,7 @@ fn movMirTag(self: *Self, ty: Type, aligned: bool) !Mir.Inst.FixedTag { else => {}, }, } - return self.fail("TODO movMirTag for {}", .{ty.fmt(self.bin_file.options.module.?)}); + return self.fail("TODO moveStrategy for {}", .{ty.fmt(self.bin_file.options.module.?)}); } fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { @@ -8764,6 +8882,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .load_frame, .lea_frame, => { + const dst_alias = registerAlias(dst_reg, abi_size); const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) { .register_offset, .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, @@ -8775,71 +8894,81 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }, else => unreachable, }); - if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) - try self.asmRegisterMemoryImmediate( - .{ .p_w, .insr }, - registerAlias(dst_reg, abi_size), + switch (@as(MoveStrategy, switch (src_mcv) { + .register_offset => |reg_off| switch (reg_off.off) { + 0 => return self.genSetReg(dst_reg, ty, .{ .register = reg_off.reg }), + else => .{ .move = .{ ._, .lea } }, + }, + .indirect => try self.moveStrategy(ty, false), + .load_frame => |frame_addr| try self.moveStrategy( + ty, + self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(self.target.*), + ), + .lea_frame => .{ .move = .{ ._, .lea } }, + else => unreachable, + })) { + .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem), + .insert_extract => |ie| try self.asmRegisterMemoryImmediate( + ie.insert, + dst_alias, src_mem, - Immediate.u(0), - ) - else - try self.asmRegisterMemory( - switch (src_mcv) { - .register_offset => |reg_off| switch (reg_off.off) { - 0 => return self.genSetReg(dst_reg, ty, .{ .register = reg_off.reg }), - else => .{ ._, .lea }, - }, - .indirect => try self.movMirTag(ty, false), - .load_frame => |frame_addr| try self.movMirTag( - ty, - self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(self.target.*), - ), - .lea_frame => .{ ._, .lea }, - else => unreachable, - }, - registerAlias(dst_reg, abi_size), + ie.imm, + ), + .vex_insert_extract => |ie| try self.asmRegisterRegisterMemoryImmediate( + ie.insert, + dst_alias, + dst_alias, src_mem, - ); + ie.imm, + ), + } }, .memory, .load_direct, .load_got, .load_tlv => { switch (src_mcv) { .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| { + const dst_alias = registerAlias(dst_reg, abi_size); const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = .ds }, .disp = small_addr, }); - return if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) - self.asmRegisterMemoryImmediate( - .{ .p_w, .insr }, - registerAlias(dst_reg, abi_size), + switch (try self.moveStrategy(ty, mem.isAlignedGeneric( + u32, + @bitCast(u32, small_addr), + ty.abiAlignment(self.target.*), + ))) { + .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem), + .insert_extract => |ie| try self.asmRegisterMemoryImmediate( + ie.insert, + dst_alias, src_mem, - Immediate.u(0), - ) - else - self.asmRegisterMemory( - try self.movMirTag(ty, mem.isAlignedGeneric( - u32, - @bitCast(u32, small_addr), - ty.abiAlignment(self.target.*), - )), - registerAlias(dst_reg, abi_size), + ie.imm, + ), + .vex_insert_extract => |ie| try self.asmRegisterRegisterMemoryImmediate( + ie.insert, + dst_alias, + dst_alias, src_mem, - ); + ie.imm, + ), + } }, - .load_direct => |sym_index| if (!ty.isRuntimeFloat()) { - const atom_index = try self.owner.getSymbolIndex(self); - _ = try self.addInst(.{ - .tag = .mov, - .ops = .direct_reloc, - .data = .{ .rx = .{ - .r1 = dst_reg.to64(), - .payload = try self.addExtra(Mir.Reloc{ - .atom_index = atom_index, - .sym_index = sym_index, - }), - } }, - }); - return; + .load_direct => |sym_index| switch (ty.zigTypeTag()) { + else => { + const atom_index = try self.owner.getSymbolIndex(self); + _ = try self.addInst(.{ + .tag = .mov, + .ops = .direct_reloc, + .data = .{ .rx = .{ + .r1 = dst_reg.to64(), + .payload = try self.addExtra(Mir.Reloc{ + .atom_index = atom_index, + .sym_index = sym_index, + }), + } }, + }); + return; + }, + .Float, .Vector => {}, }, .load_got, .load_tlv => {}, else => unreachable, @@ -8849,22 +8978,26 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); + const dst_alias = registerAlias(dst_reg, abi_size); const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = addr_reg }, }); - if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) - try self.asmRegisterMemoryImmediate( - .{ .p_w, .insr }, - registerAlias(dst_reg, abi_size), + switch (try self.moveStrategy(ty, false)) { + .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem), + .insert_extract => |ie| try self.asmRegisterMemoryImmediate( + ie.insert, + dst_alias, src_mem, - Immediate.u(0), - ) - else - try self.asmRegisterMemory( - try self.movMirTag(ty, false), - registerAlias(dst_reg, abi_size), + ie.imm, + ), + .vex_insert_extract => |ie| try self.asmRegisterRegisterMemoryImmediate( + ie.insert, + dst_alias, + dst_alias, src_mem, - ); + ie.imm, + ), + } }, .lea_direct, .lea_got => |sym_index| { const atom_index = try self.owner.getSymbolIndex(self); @@ -8966,36 +9099,33 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }, ); - if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) - try self.asmMemoryRegisterImmediate( - .{ .p_w, .extr }, - dst_mem, - src_reg.to128(), - Immediate.u(0), - ) - else - try self.asmMemoryRegister( - try self.movMirTag(ty, switch (base) { - .none => mem.isAlignedGeneric( - u32, - @bitCast(u32, disp), - ty.abiAlignment(self.target.*), - ), - .reg => |reg| switch (reg) { - .es, .cs, .ss, .ds => mem.isAlignedGeneric( - u32, - @bitCast(u32, disp), - ty.abiAlignment(self.target.*), - ), - else => false, - }, - .frame => |frame_index| self.getFrameAddrAlignment( - .{ .index = frame_index, .off = disp }, - ) >= ty.abiAlignment(self.target.*), - }), + const src_alias = registerAlias(src_reg, abi_size); + switch (try self.moveStrategy(ty, switch (base) { + .none => mem.isAlignedGeneric( + u32, + @bitCast(u32, disp), + ty.abiAlignment(self.target.*), + ), + .reg => |reg| switch (reg) { + .es, .cs, .ss, .ds => mem.isAlignedGeneric( + u32, + @bitCast(u32, disp), + ty.abiAlignment(self.target.*), + ), + else => false, + }, + .frame => |frame_index| self.getFrameAddrAlignment( + .{ .index = frame_index, .off = disp }, + ) >= ty.abiAlignment(self.target.*), + })) { + .move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_alias), + .insert_extract, .vex_insert_extract => |ie| try self.asmMemoryRegisterImmediate( + ie.extract, dst_mem, - registerAlias(src_reg, abi_size), - ); + src_alias, + ie.imm, + ), + } }, .register_overflow => |ro| { try self.genSetMem( diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index 6496e00afd..b4d2160713 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -146,7 +146,6 @@ test "@min/@max more than two arguments" { test "@min/@max more than two vector arguments" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 537879b5c9..87ccdfb567 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -1129,7 +1129,6 @@ test "loading the second vector from a slice of vectors" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO @setRuntimeSafety(false); var small_bases = [2]@Vector(2, u8){ @@ -1219,7 +1218,6 @@ test "zero multiplicand" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const zeros = @Vector(2, u32){ 0.0, 0.0 }; var ones = @Vector(2, u32){ 1.0, 1.0 }; @@ -1324,7 +1322,6 @@ test "store to vector in slice" { test "addition of vectors represented as strings" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const V = @Vector(3, u8); -- cgit v1.2.3 From 57c38f6433c8024d1946bcf1b5b7d0892fc751a7 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 13 May 2023 02:24:41 -0400 Subject: x86_64: implement global payload pointers --- src/arch/x86_64/CodeGen.zig | 7 +++++-- src/codegen.zig | 34 ++++++++++++++++++++++++++++++---- test/behavior/optional.zig | 2 -- 3 files changed, 35 insertions(+), 8 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index c04bb1d2a5..4aa2443295 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3467,14 +3467,17 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); } - const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) + const dst_mcv: MCValue = if (src_mcv.isRegister() and + self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv + else if (self.liveness.isUnused(inst)) + .{ .register = try self.copyToTmpRegister(dst_ty, src_mcv) } else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); const pl_ty = dst_ty.childType(); const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); - try self.genSetMem(.{ .reg = dst_mcv.register }, pl_abi_size, Type.bool, .{ .immediate = 1 }); + try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); diff --git a/src/codegen.zig b/src/codegen.zig index 7f65df2804..7a22d0b218 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -380,7 +380,7 @@ pub fn generateSymbol( return Result.ok; }, - .field_ptr, .elem_ptr => return lowerParentPtr( + .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr( bin_file, src_loc, typed_value, @@ -812,7 +812,6 @@ fn lowerParentPtr( reloc_info: RelocInfo, ) CodeGenError!Result { const target = bin_file.options.target; - switch (parent_ptr.tag()) { .field_ptr => { const field_ptr = parent_ptr.castTag(.field_ptr).?.data; @@ -858,6 +857,31 @@ fn lowerParentPtr( reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(target))), ); }, + .opt_payload_ptr => { + const opt_payload_ptr = parent_ptr.castTag(.opt_payload_ptr).?.data; + return lowerParentPtr( + bin_file, + src_loc, + typed_value, + opt_payload_ptr.container_ptr, + code, + debug_output, + reloc_info, + ); + }, + .eu_payload_ptr => { + const eu_payload_ptr = parent_ptr.castTag(.eu_payload_ptr).?.data; + const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(); + return lowerParentPtr( + bin_file, + src_loc, + typed_value, + eu_payload_ptr.container_ptr, + code, + debug_output, + reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, target))), + ); + }, .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( bin_file, src_loc, @@ -1262,9 +1286,10 @@ pub fn genTypedValue( } pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; const payload_align = payload_ty.abiAlignment(target); const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align) { + if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime()) { return 0; } else { return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(target), payload_align); @@ -1272,9 +1297,10 @@ pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 { } pub fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; const payload_align = payload_ty.abiAlignment(target); const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align) { + if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime()) { return mem.alignForwardGeneric(u64, payload_ty.abiSize(target), error_align); } else { return 0; diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index 34d8337608..e62065cf25 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -74,7 +74,6 @@ test "optional with void type" { test "address of unwrap optional" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; @@ -365,7 +364,6 @@ test "optional pointer to zero bit optional payload" { } test "optional pointer to zero bit error union payload" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 904ffb41de9caa3f8f99806518d719beef832b7c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 13 May 2023 02:51:46 -0400 Subject: x86_64: implement calling function references --- src/arch/x86_64/CodeGen.zig | 18 +++++++++++------- test/behavior/bugs/1277.zig | 1 - test/behavior/bugs/12801-2.zig | 1 - test/behavior/fn.zig | 1 - 4 files changed, 11 insertions(+), 10 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 4aa2443295..e4f28e34cf 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -7378,11 +7378,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // on linking. const mod = self.bin_file.options.module.?; if (self.air.value(callee)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (if (func_value.castTag(.function)) |func_payload| + func_payload.data.owner_decl + else if (func_value.castTag(.decl_ref)) |decl_ref_payload| + decl_ref_payload.data + else + null) |owner_decl| + { if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); + const atom_index = try elf_file.getOrCreateAtomForDecl(owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = atom.getOffsetTableAddress(elf_file); @@ -7391,17 +7395,17 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .disp = @intCast(i32, got_addr), })); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl); + const atom = try coff_file.getOrCreateAtomForDecl(owner_decl); const sym_index = coff_file.getAtom(atom).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); try self.asmRegister(.{ ._, .call }, .rax); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); + const atom = try macho_file.getOrCreateAtomForDecl(owner_decl); const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); try self.asmRegister(.{ ._, .call }, .rax); } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - const decl_block_index = try p9.seeDecl(func.owner_decl); + const decl_block_index = try p9.seeDecl(owner_decl); const decl_block = p9.getDeclBlock(decl_block_index); const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); diff --git a/test/behavior/bugs/1277.zig b/test/behavior/bugs/1277.zig index 64f8430cdb..d5312a378e 100644 --- a/test/behavior/bugs/1277.zig +++ b/test/behavior/bugs/1277.zig @@ -14,7 +14,6 @@ fn f() i32 { test "don't emit an LLVM global for a const function when it's in an optional in a struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; diff --git a/test/behavior/bugs/12801-2.zig b/test/behavior/bugs/12801-2.zig index e6243487b0..6b145e9925 100644 --- a/test/behavior/bugs/12801-2.zig +++ b/test/behavior/bugs/12801-2.zig @@ -16,7 +16,6 @@ const Auto = struct { test { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 53f964c70a..c84eb48d2e 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -502,7 +502,6 @@ test "method call with optional pointer first param" { } test "using @ptrCast on function pointers" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From e08eab664861461b0adbe7984881f72b5a36a979 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 13 May 2023 14:06:26 -0400 Subject: x86_64: add missing encoding feature requirements --- src/arch/x86_64/Encoding.zig | 3 +++ src/arch/x86_64/encodings.zig | 18 +++++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 073128b85e..537a03fa2a 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -691,8 +691,11 @@ pub const Feature = enum { none, avx, avx2, + bmi, f16c, fma, + lzcnt, + popcnt, sse, sse2, sse3, diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index f56f31da7f..a7a50867c3 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -354,9 +354,9 @@ pub const table = [_]Entry{ .{ .lodsd, .np, &.{}, &.{ 0xad }, 0, .none, .none }, .{ .lodsq, .np, &.{}, &.{ 0xad }, 0, .long, .none }, - .{ .lzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .short, .none }, - .{ .lzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .none, .none }, - .{ .lzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .long, .none }, + .{ .lzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .short, .lzcnt }, + .{ .lzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .none, .lzcnt }, + .{ .lzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbd }, 0, .long, .lzcnt }, .{ .mfence, .np, &.{}, &.{ 0x0f, 0xae, 0xf0 }, 0, .none, .none }, @@ -482,9 +482,9 @@ pub const table = [_]Entry{ .{ .pop, .m, &.{ .rm16 }, &.{ 0x8f }, 0, .short, .none }, .{ .pop, .m, &.{ .rm64 }, &.{ 0x8f }, 0, .none, .none }, - .{ .popcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .short, .none }, - .{ .popcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .none, .none }, - .{ .popcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .long, .none }, + .{ .popcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .short, .popcnt }, + .{ .popcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .none, .popcnt }, + .{ .popcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xb8 }, 0, .long, .popcnt }, .{ .push, .o, &.{ .r16 }, &.{ 0x50 }, 0, .short, .none }, .{ .push, .o, &.{ .r64 }, &.{ 0x50 }, 0, .none, .none }, @@ -784,9 +784,9 @@ pub const table = [_]Entry{ .{ .@"test", .mr, &.{ .rm32, .r32 }, &.{ 0x85 }, 0, .none, .none }, .{ .@"test", .mr, &.{ .rm64, .r64 }, &.{ 0x85 }, 0, .long, .none }, - .{ .tzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .short, .none }, - .{ .tzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .none, .none }, - .{ .tzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .long, .none }, + .{ .tzcnt, .rm, &.{ .r16, .rm16 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .short, .bmi }, + .{ .tzcnt, .rm, &.{ .r32, .rm32 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .none, .bmi }, + .{ .tzcnt, .rm, &.{ .r64, .rm64 }, &.{ 0xf3, 0x0f, 0xbc }, 0, .long, .bmi }, .{ .ud2, .np, &.{}, &.{ 0x0f, 0x0b }, 0, .none, .none }, -- cgit v1.2.3 From b6d61028508c5b1e1961a124bc17d4d9bda9686f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 13 May 2023 18:06:16 -0400 Subject: x86_64: reimplement `@floatToInt` --- src/arch/x86_64/CodeGen.zig | 181 +++++++++++++++++++++--------------------- src/arch/x86_64/Encoding.zig | 159 ++++++++++++++++++++----------------- src/arch/x86_64/Mir.zig | 46 +++++++++-- src/arch/x86_64/bits.zig | 83 +++++++++---------- src/arch/x86_64/encodings.zig | 111 ++++++++++++++++++++++---- src/link/Dwarf.zig | 92 +++++++++++++++------ test/behavior/cast.zig | 1 - 7 files changed, 420 insertions(+), 253 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e4f28e34cf..e5c6925596 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2501,12 +2501,12 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { } } else if (src_bits == 64 and dst_bits == 32) { if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( - .{ .v_, .cvtsd2ss }, + .{ .v_ss, .cvtsd2 }, dst_reg, dst_reg, src_mcv.mem(.qword), ) else try self.asmRegisterRegisterRegister( - .{ .v_, .cvtsd2ss }, + .{ .v_ss, .cvtsd2 }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -2514,11 +2514,11 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToTmpRegister(src_ty, src_mcv)).to128(), ) else if (src_mcv.isMemory()) try self.asmRegisterMemory( - .{ ._, .cvtsd2ss }, + .{ ._ss, .cvtsd2 }, dst_reg, src_mcv.mem(.qword), ) else try self.asmRegisterRegister( - .{ ._, .cvtsd2ss }, + .{ ._ss, .cvtsd2 }, dst_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -2552,22 +2552,22 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { src_mcv.getReg().? else try self.copyToTmpRegister(src_ty, src_mcv); - try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, mat_src_reg.to128()); + try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, mat_src_reg.to128()); switch (dst_bits) { 32 => {}, - 64 => try self.asmRegisterRegisterRegister(.{ .v_, .cvtss2sd }, dst_reg, dst_reg, dst_reg), + 64 => try self.asmRegisterRegisterRegister(.{ .v_sd, .cvtss2 }, dst_reg, dst_reg, dst_reg), else => return self.fail("TODO implement airFpext from {} to {}", .{ src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), } } else if (src_bits == 32 and dst_bits == 64) { if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( - .{ .v_, .cvtss2sd }, + .{ .v_sd, .cvtss2 }, dst_reg, dst_reg, src_mcv.mem(.dword), ) else try self.asmRegisterRegisterRegister( - .{ .v_, .cvtss2sd }, + .{ .v_sd, .cvtss2 }, dst_reg, dst_reg, (if (src_mcv.isRegister()) @@ -2575,11 +2575,11 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToTmpRegister(src_ty, src_mcv)).to128(), ) else if (src_mcv.isMemory()) try self.asmRegisterMemory( - .{ ._, .cvtss2sd }, + .{ ._sd, .cvtss2 }, dst_reg, src_mcv.mem(.dword), ) else try self.asmRegisterRegister( - .{ ._, .cvtss2sd }, + .{ ._sd, .cvtss2 }, dst_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -4789,7 +4789,6 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 })) |tag| tag else return self.fail("TODO implement genRound for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); const dst_alias = registerAlias(dst_reg, abi_size); switch (mir_tag[0]) { @@ -4848,7 +4847,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { src_mcv.getReg().? else try self.copyToTmpRegister(ty, src_mcv); - try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, mat_src_reg.to128()); + try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, mat_src_reg.to128()); try self.asmRegisterRegisterRegister(.{ .v_ss, .sqrt }, dst_reg, dst_reg, dst_reg); try self.asmRegisterRegisterImmediate( .{ .v_, .cvtps2ph }, @@ -4868,7 +4867,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { 1 => { try self.asmRegisterRegister( - .{ .v_, .cvtph2ps }, + .{ .v_ps, .cvtph2 }, dst_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -4892,13 +4891,13 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 2...8 => { const wide_reg = registerAlias(dst_reg, abi_size * 2); if (src_mcv.isMemory()) try self.asmRegisterMemory( - .{ .v_, .cvtph2ps }, + .{ .v_ps, .cvtph2 }, wide_reg, src_mcv.mem(Memory.PtrSize.fromSize( @intCast(u32, @divExact(wide_reg.bitSize(), 16)), )), ) else try self.asmRegisterRegister( - .{ .v_, .cvtph2ps }, + .{ .v_ps, .cvtph2 }, wide_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -6347,7 +6346,7 @@ fn genBinOp( else try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, dst_reg); try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp_reg, dst_reg); try self.asmRegisterRegisterRegister( switch (air_tag) { @@ -6424,7 +6423,7 @@ fn genBinOp( else try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, dst_reg); try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp_reg, dst_reg); try self.asmRegisterRegisterRegister( switch (air_tag) { @@ -6467,7 +6466,7 @@ fn genBinOp( else try self.copyToTmpRegister(rhs_ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, dst_reg); try self.asmRegisterRegisterRegister( .{ .v_ps, .movhl }, tmp_reg, @@ -6501,13 +6500,13 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg, dst_reg); + try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, dst_reg); if (src_mcv.isMemory()) try self.asmRegisterMemory( - .{ .v_, .cvtph2ps }, + .{ .v_ps, .cvtph2 }, tmp_reg, src_mcv.mem(.qword), ) else try self.asmRegisterRegister( - .{ .v_, .cvtph2ps }, + .{ .v_ps, .cvtph2 }, tmp_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -6541,13 +6540,13 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, dst_reg.to256(), dst_reg); + try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg.to256(), dst_reg); if (src_mcv.isMemory()) try self.asmRegisterMemory( - .{ .v_, .cvtph2ps }, + .{ .v_ps, .cvtph2 }, tmp_reg, src_mcv.mem(.xword), ) else try self.asmRegisterRegister( - .{ .v_, .cvtph2ps }, + .{ .v_ps, .cvtph2 }, tmp_reg, (if (src_mcv.isRegister()) src_mcv.getReg().? @@ -7199,13 +7198,13 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { switch (self.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { - .register => |reg| .{ .register = reg.dwarfLocOp() }, + .register => |reg| .{ .register = reg.dwarfNum() }, // TODO use a frame index .load_frame => return, //.stack_offset => |off| .{ // .stack = .{ // // TODO handle -fomit-frame-pointer - // .fp_register = Register.rbp.dwarfLocOpDeref(), + // .fp_register = Register.rbp.dwarfNum(), // .offset = -off, // }, //}, @@ -7237,11 +7236,11 @@ fn genVarDbgInfo( switch (self.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { - .register => |reg| .{ .register = reg.dwarfLocOp() }, + .register => |reg| .{ .register = reg.dwarfNum() }, // TODO use a frame index .load_frame, .lea_frame => return, //=> |off| .{ .stack = .{ - // .fp_register = Register.rbp.dwarfLocOpDeref(), + // .fp_register = Register.rbp.dwarfNum(), // .offset = -off, //} }, .memory => |address| .{ .memory = address }, @@ -7595,7 +7594,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { else try self.copyToTmpRegister(ty, src_mcv)).to128(), ); - try self.asmRegisterRegister(.{ .v_, .cvtph2ps }, tmp1_reg, tmp1_reg); + try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, tmp1_reg, tmp1_reg); try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp2_reg, tmp1_reg); try self.genBinOpMir(.{ ._ss, .ucomi }, ty, tmp1_mcv, tmp2_mcv); } else return self.fail("TODO implement airCmp for {}", .{ @@ -8862,14 +8861,14 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr } }, .register => |src_reg| if (dst_reg.id() != src_reg.id()) try self.asmRegisterRegister( - if ((dst_reg.class() == .floating_point) == (src_reg.class() == .floating_point)) + if ((dst_reg.class() == .sse) == (src_reg.class() == .sse)) switch (ty.zigTypeTag()) { else => .{ ._, .mov }, .Float, .Vector => .{ ._ps, .mova }, } else switch (abi_size) { 2 => return try self.asmRegisterRegisterImmediate( - if (dst_reg.class() == .floating_point) .{ .p_w, .insr } else .{ .p_w, .extr }, + if (dst_reg.class() == .sse) .{ .p_w, .insr } else .{ .p_w, .extr }, registerAlias(dst_reg, 4), registerAlias(src_reg, 4), Immediate.u(0), @@ -9222,7 +9221,7 @@ fn genInlineMemcpyRegisterRegister( try self.asmMemoryRegister( switch (src_reg.class()) { .general_purpose, .segment => .{ ._, .mov }, - .floating_point => .{ ._ss, .mov }, + .sse => .{ ._ss, .mov }, }, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = dst_reg, .disp = -offset }), registerAlias(src_reg, abi_size), @@ -9388,10 +9387,10 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { }); const src_mcv = try self.resolveInst(ty_op.operand); - const src_reg = switch (src_mcv) { - .register => |reg| reg, - else => try self.copyToTmpRegister(src_ty, src_mcv), - }; + const src_reg = if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(src_ty, src_mcv); const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); @@ -9402,23 +9401,23 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - try self.asmRegisterRegister(switch (dst_ty.floatBits(self.target.*)) { - 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) - .{ ._, .cvtsi2ss } - else - return self.fail("TODO implement airIntToFloat from {} to {} without sse", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), - }), - 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) - .{ ._, .cvtsi2sd } - else - return self.fail("TODO implement airIntToFloat from {} to {} without sse2", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), - }), - else => return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), - }), - }, dst_reg.to128(), registerAlias(src_reg, src_size)); + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag()) { + .Float => switch (dst_ty.floatBits(self.target.*)) { + 32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 }, + 64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 }, + 16, 80, 128 => null, + else => unreachable, + }, + else => null, + })) |tag| tag else return self.fail("TODO implement airIntToFloat from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + }); + const dst_alias = dst_reg.to128(); + const src_alias = registerAlias(src_reg, src_size); + switch (mir_tag[0]) { + .v_ss, .v_sd => try self.asmRegisterRegisterRegister(mir_tag, dst_alias, dst_alias, src_alias), + else => try self.asmRegisterRegister(mir_tag, dst_alias, src_alias), + } return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } @@ -9428,46 +9427,50 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const src_ty = self.air.typeOf(ty_op.operand); const dst_ty = self.air.typeOfIndex(inst); - const operand = try self.resolveInst(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_bits = @intCast(u32, dst_ty.bitSize(self.target.*)); + const dst_signedness = + if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; - switch (src_abi_size) { - 4, 8 => {}, - else => |size| return self.fail("TODO load ST(0) with abiSize={}", .{size}), - } - if (dst_abi_size > 8) { - return self.fail("TODO convert float with abiSize={}", .{dst_abi_size}); - } + const dst_size = std.math.divCeil(u32, @max(switch (dst_signedness) { + .signed => dst_bits, + .unsigned => dst_bits + 1, + }, 32), 8) catch unreachable; + if (dst_size > 8) return self.fail("TODO implement airFloatToInt from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + }); - // move float src to ST(0) - const frame_addr: FrameAddr = switch (operand) { - .load_frame => |frame_addr| frame_addr, - else => frame_addr: { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(src_ty, self.target.*)); - try self.genSetMem(.{ .frame = frame_index }, 0, src_ty, operand); - break :frame_addr .{ .index = frame_index }; - }, - }; - try self.asmMemory( - .{ .f_, .ld }, - Memory.sib(Memory.PtrSize.fromSize(src_abi_size), .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, - }), - ); + const src_mcv = try self.resolveInst(ty_op.operand); + const src_reg = if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(src_ty, src_mcv); + const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); + defer self.register_manager.unlockReg(src_lock); - // convert - const stack_dst = try self.allocRegOrMem(inst, false); - try self.asmMemory( - .{ .f_p, .istt }, - Memory.sib(Memory.PtrSize.fromSize(dst_abi_size), .{ - .base = .{ .frame = stack_dst.load_frame.index }, - .disp = stack_dst.load_frame.off, + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_mcv = MCValue{ .register = dst_reg }; + const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); + defer self.register_manager.unlockReg(dst_lock); + + try self.asmRegisterRegister( + if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag()) { + .Float => switch (src_ty.floatBits(self.target.*)) { + 32 => if (self.hasFeature(.avx)) .{ .v_, .cvttss2si } else .{ ._, .cvttss2si }, + 64 => if (self.hasFeature(.avx)) .{ .v_, .cvttsd2si } else .{ ._, .cvttsd2si }, + 16, 80, 128 => null, + else => unreachable, + }, + else => null, + })) |tag| tag else return self.fail("TODO implement airFloatToInt from {} to {}", .{ + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), + registerAlias(dst_reg, dst_size), + src_reg.to128(), ); - return self.finishAir(inst, stack_dst, .{ ty_op.operand, .none, .none }); + if (dst_bits < dst_size * 8) try self.truncateRegister(dst_ty, dst_reg); + + return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { @@ -10997,13 +11000,13 @@ fn registerAlias(reg: Register, size_bytes: u32) Register { reg.to64() else unreachable, - .floating_point => if (size_bytes <= 16) + .segment, .x87, .mmx => unreachable, + .sse => if (size_bytes <= 16) reg.to128() else if (size_bytes <= 32) reg.to256() else unreachable, - .segment => unreachable, }; } diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 537a03fa2a..66a249a3f2 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -233,7 +233,6 @@ pub const Mnemonic = enum { cmpxchg, cmpxchg8b, cmpxchg16b, cqo, cwd, cwde, div, - fisttp, fld, idiv, imul, int3, ja, jae, jb, jbe, jc, jrcxz, je, jg, jge, jl, jle, jna, jnae, jnb, jnbe, jnc, jne, jng, jnge, jnl, jnle, jno, jnp, jns, jnz, jo, jp, jpe, jpo, js, jz, @@ -259,6 +258,8 @@ pub const Mnemonic = enum { @"test", tzcnt, ud2, xadd, xchg, xor, + // X87 + fisttp, fld, // MMX movd, // SSE @@ -266,7 +267,7 @@ pub const Mnemonic = enum { andps, andnps, cmpss, - cvtsi2ss, + cvtpi2ps, cvtps2pi, cvtsi2ss, cvtss2si, cvttps2pi, cvttss2si, divps, divss, maxps, maxss, minps, minss, @@ -285,7 +286,9 @@ pub const Mnemonic = enum { andpd, andnpd, //cmpsd, - cvtsd2ss, cvtsi2sd, cvtss2sd, + cvtdq2pd, cvtdq2ps, cvtpd2dq, cvtpd2pi, cvtpd2ps, cvtpi2pd, + cvtps2dq, cvtps2pd, cvtsd2si, cvtsd2ss, cvtsi2sd, cvtss2sd, + cvttpd2dq, cvttpd2pi, cvttps2dq, cvttsd2si, divpd, divsd, maxpd, maxsd, minpd, minsd, @@ -314,7 +317,10 @@ pub const Mnemonic = enum { // AVX vaddpd, vaddps, vaddsd, vaddss, vbroadcastf128, vbroadcastsd, vbroadcastss, - vcvtsd2ss, vcvtsi2sd, vcvtsi2ss, vcvtss2sd, + vcvtdq2pd, vcvtdq2ps, vcvtpd2dq, vcvtpd2ps, + vcvtps2dq, vcvtps2pd, vcvtsd2si, vcvtsd2ss, + vcvtsi2sd, vcvtsi2ss, vcvtss2sd, vcvtss2si, + vcvttpd2dq, vcvttps2dq, vcvttsd2si, vcvttss2si, vdivpd, vdivps, vdivsd, vdivss, vextractf128, vextractps, vinsertf128, vinsertps, @@ -377,80 +383,84 @@ pub const Op = enum { m, moffs, sreg, + st, mm, mm_m64, xmm, xmm_m32, xmm_m64, xmm_m128, ymm, ymm_m256, // zig fmt: on pub fn fromOperand(operand: Instruction.Operand) Op { - switch (operand) { - .none => return .none, - - .reg => |reg| { - switch (reg.class()) { - .segment => return .sreg, - .floating_point => return switch (reg.bitSize()) { - 128 => .xmm, - 256 => .ymm, + return switch (operand) { + .none => .none, + + .reg => |reg| switch (reg.class()) { + .general_purpose => if (reg.to64() == .rax) + switch (reg) { + .al => .al, + .ax => .ax, + .eax => .eax, + .rax => .rax, else => unreachable, - }, - .general_purpose => { - if (reg.to64() == .rax) return switch (reg) { - .al => .al, - .ax => .ax, - .eax => .eax, - .rax => .rax, - else => unreachable, - }; - if (reg == .cl) return .cl; - return switch (reg.bitSize()) { - 8 => .r8, - 16 => .r16, - 32 => .r32, - 64 => .r64, - else => unreachable, - }; - }, - } + } + else if (reg == .cl) + .cl + else switch (reg.bitSize()) { + 8 => .r8, + 16 => .r16, + 32 => .r32, + 64 => .r64, + else => unreachable, + }, + .segment => .sreg, + .x87 => .st, + .mmx => .mm, + .sse => switch (reg.bitSize()) { + 128 => .xmm, + 256 => .ymm, + else => unreachable, + }, }, .mem => |mem| switch (mem) { - .moffs => return .moffs, - .sib, .rip => { - const bit_size = mem.bitSize(); - return switch (bit_size) { - 8 => .m8, - 16 => .m16, - 32 => .m32, - 64 => .m64, - 80 => .m80, - 128 => .m128, - 256 => .m256, - else => unreachable, - }; + .moffs => .moffs, + .sib, .rip => switch (mem.bitSize()) { + 8 => .m8, + 16 => .m16, + 32 => .m32, + 64 => .m64, + 80 => .m80, + 128 => .m128, + 256 => .m256, + else => unreachable, }, }, - .imm => |imm| { - switch (imm) { - .signed => |x| { - if (x == 1) return .unity; - if (math.cast(i8, x)) |_| return .imm8s; - if (math.cast(i16, x)) |_| return .imm16s; - return .imm32s; - }, - .unsigned => |x| { - if (x == 1) return .unity; - if (math.cast(i8, x)) |_| return .imm8s; - if (math.cast(u8, x)) |_| return .imm8; - if (math.cast(i16, x)) |_| return .imm16s; - if (math.cast(u16, x)) |_| return .imm16; - if (math.cast(i32, x)) |_| return .imm32s; - if (math.cast(u32, x)) |_| return .imm32; - return .imm64; - }, - } + .imm => |imm| switch (imm) { + .signed => |x| if (x == 1) + .unity + else if (math.cast(i8, x)) |_| + .imm8s + else if (math.cast(i16, x)) |_| + .imm16s + else + .imm32s, + .unsigned => |x| if (x == 1) + .unity + else if (math.cast(i8, x)) |_| + .imm8s + else if (math.cast(u8, x)) |_| + .imm8 + else if (math.cast(i16, x)) |_| + .imm16s + else if (math.cast(u16, x)) |_| + .imm16 + else if (math.cast(i32, x)) |_| + .imm32s + else if (math.cast(u32, x)) |_| + .imm32 + else + .imm64, }, - } + }; } pub fn immBitSize(op: Op) u64 { @@ -460,6 +470,7 @@ pub const Op = enum { .ax, .r16, .rm16 => unreachable, .eax, .r32, .rm32, .r32_m16 => unreachable, .rax, .r64, .rm64, .r64_m16 => unreachable, + .st, .mm, .mm_m64 => unreachable, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => unreachable, .ymm, .ymm_m256 => unreachable, .m8, .m16, .m32, .m64, .m80, .m128, .m256 => unreachable, @@ -480,7 +491,8 @@ pub const Op = enum { .al, .cl, .r8, .rm8 => 8, .ax, .r16, .rm16 => 16, .eax, .r32, .rm32, .r32_m8, .r32_m16 => 32, - .rax, .r64, .rm64, .r64_m16 => 64, + .rax, .r64, .rm64, .r64_m16, .mm, .mm_m64 => 64, + .st => 80, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => 128, .ymm, .ymm_m256 => 256, }; @@ -491,11 +503,11 @@ pub const Op = enum { .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, .unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable, .rel8, .rel16, .rel32 => unreachable, - .al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64, .xmm, .ymm => unreachable, + .al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64, .st, .mm, .xmm, .ymm => unreachable, .m8, .rm8, .r32_m8 => 8, .m16, .rm16, .r32_m16, .r64_m16 => 16, .m32, .rm32, .xmm_m32 => 32, - .m64, .rm64, .xmm_m64 => 64, + .m64, .rm64, .mm_m64, .xmm_m64 => 64, .m80 => 80, .m128, .xmm_m128 => 128, .m256, .ymm_m256 => 256, @@ -522,6 +534,7 @@ pub const Op = enum { .r8, .r16, .r32, .r64, .rm8, .rm16, .rm32, .rm64, .r32_m8, .r32_m16, .r64_m16, + .st, .mm, .mm_m64, .xmm, .xmm_m32, .xmm_m64, .xmm_m128, .ymm, .ymm_m256, => true, @@ -550,6 +563,7 @@ pub const Op = enum { .r32_m8, .r32_m16, .r64_m16, .m8, .m16, .m32, .m64, .m80, .m128, .m256, .m, + .mm_m64, .xmm_m32, .xmm_m64, .xmm_m128, .ymm_m256, => true, @@ -573,8 +587,10 @@ pub const Op = enum { .rm8, .rm16, .rm32, .rm64 => .general_purpose, .r32_m8, .r32_m16, .r64_m16 => .general_purpose, .sreg => .segment, - .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => .floating_point, - .ymm, .ymm_m256 => .floating_point, + .st => .x87, + .mm, .mm_m64 => .mmx, + .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => .sse, + .ymm, .ymm_m256 => .sse, }; } @@ -695,6 +711,7 @@ pub const Feature = enum { f16c, fma, lzcnt, + movbe, popcnt, sse, sse2, @@ -717,7 +734,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op } const mnemonic_to_encodings_map = init: { - @setEvalBranchQuota(20_000); + @setEvalBranchQuota(25_000); const encodings = @import("encodings.zig"); var entries = encodings.table; std.sort.sort(encodings.Entry, &entries, {}, struct { diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index f26bf97e82..ef8bbe07b3 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -439,8 +439,21 @@ pub const Inst = struct { /// Bitwise logical and not of packed single-precision floating-point values /// Bitwise logical and not of packed double-precision floating-point values andn, + /// Convert packed doubleword integers to packed single-precision floating-point values + /// Convert packed doubleword integers to packed double-precision floating-point values + cvtpi2, + /// Convert packed single-precision floating-point values to packed doubleword integers + cvtps2pi, /// Convert doubleword integer to scalar single-precision floating-point value - cvtsi2ss, + /// Convert doubleword integer to scalar double-precision floating-point value + cvtsi2, + /// Convert scalar single-precision floating-point value to doubleword integer + cvtss2si, + /// Convert with truncation packed single-precision floating-point values to packed doubleword integers + cvttps2pi, + /// Convert with truncation scalar single-precision floating-point value to doubleword integer + cvttss2si, + /// Maximum of packed single-precision floating-point values /// Maximum of scalar single-precision floating-point values /// Maximum of packed double-precision floating-point values @@ -486,12 +499,33 @@ pub const Inst = struct { /// Unpack and interleave low packed double-precision floating-point values unpckl, + /// Convert packed doubleword integers to packed single-precision floating-point values + /// Convert packed doubleword integers to packed double-precision floating-point values + cvtdq2, + /// Convert packed double-precision floating-point values to packed doubleword integers + cvtpd2dq, + /// Convert packed double-precision floating-point values to packed doubleword integers + cvtpd2pi, + /// Convert packed double-precision floating-point values to packed single-precision floating-point values + cvtpd2, + /// Convert packed single-precision floating-point values to packed doubleword integers + cvtps2dq, + /// Convert packed single-precision floating-point values to packed double-precision floating-point values + cvtps2, + /// Convert scalar double-precision floating-point value to doubleword integer + cvtsd2si, /// Convert scalar double-precision floating-point value to scalar single-precision floating-point value - cvtsd2ss, - /// Convert doubleword integer to scalar double-precision floating-point value - cvtsi2sd, + cvtsd2, /// Convert scalar single-precision floating-point value to scalar double-precision floating-point value - cvtss2sd, + cvtss2, + /// Convert with truncation packed double-precision floating-point values to packed doubleword integers + cvttpd2dq, + /// Convert with truncation packed double-precision floating-point values to packed doubleword integers + cvttpd2pi, + /// Convert with truncation packed single-precision floating-point values to packed doubleword integers + cvttps2dq, + /// Convert with truncation scalar double-precision floating-point value to doubleword integer + cvttsd2si, /// Packed interleave shuffle of quadruplets of single-precision floating-point values /// Packed interleave shuffle of pairs of double-precision floating-point values shuf, @@ -542,7 +576,7 @@ pub const Inst = struct { broadcast, /// Convert 16-bit floating-point values to single-precision floating-point values - cvtph2ps, + cvtph2, /// Convert single-precision floating-point values to 16-bit floating-point values cvtps2ph, diff --git a/src/arch/x86_64/bits.zig b/src/arch/x86_64/bits.zig index 3343f280b9..923ba31266 100644 --- a/src/arch/x86_64/bits.zig +++ b/src/arch/x86_64/bits.zig @@ -175,15 +175,21 @@ pub const Register = enum(u7) { xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, + mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7, + + st0, st1, st2, st3, st4, st5, st6, st7, + es, cs, ss, ds, fs, gs, none, // zig fmt: on - pub const Class = enum(u2) { + pub const Class = enum { general_purpose, - floating_point, segment, + x87, + mmx, + sse, }; pub fn class(reg: Register) Class { @@ -195,8 +201,10 @@ pub const Register = enum(u7) { @enumToInt(Register.al) ... @enumToInt(Register.r15b) => .general_purpose, @enumToInt(Register.ah) ... @enumToInt(Register.bh) => .general_purpose, - @enumToInt(Register.ymm0) ... @enumToInt(Register.ymm15) => .floating_point, - @enumToInt(Register.xmm0) ... @enumToInt(Register.xmm15) => .floating_point, + @enumToInt(Register.ymm0) ... @enumToInt(Register.ymm15) => .sse, + @enumToInt(Register.xmm0) ... @enumToInt(Register.xmm15) => .sse, + @enumToInt(Register.mm0) ... @enumToInt(Register.mm7) => .mmx, + @enumToInt(Register.st0) ... @enumToInt(Register.st7) => .x87, @enumToInt(Register.es) ... @enumToInt(Register.gs) => .segment, @@ -216,8 +224,10 @@ pub const Register = enum(u7) { @enumToInt(Register.ymm0) ... @enumToInt(Register.ymm15) => @enumToInt(Register.ymm0) - 16, @enumToInt(Register.xmm0) ... @enumToInt(Register.xmm15) => @enumToInt(Register.xmm0) - 16, + @enumToInt(Register.mm0) ... @enumToInt(Register.mm7) => @enumToInt(Register.mm0) - 32, + @enumToInt(Register.st0) ... @enumToInt(Register.st7) => @enumToInt(Register.st0) - 40, - @enumToInt(Register.es) ... @enumToInt(Register.gs) => @enumToInt(Register.es) - 32, + @enumToInt(Register.es) ... @enumToInt(Register.gs) => @enumToInt(Register.es) - 48, else => unreachable, // zig fmt: on @@ -236,6 +246,8 @@ pub const Register = enum(u7) { @enumToInt(Register.ymm0) ... @enumToInt(Register.ymm15) => 256, @enumToInt(Register.xmm0) ... @enumToInt(Register.xmm15) => 128, + @enumToInt(Register.mm0) ... @enumToInt(Register.mm7) => 64, + @enumToInt(Register.st0) ... @enumToInt(Register.st7) => 80, @enumToInt(Register.es) ... @enumToInt(Register.gs) => 16, @@ -271,6 +283,8 @@ pub const Register = enum(u7) { @enumToInt(Register.ymm0) ... @enumToInt(Register.ymm15) => @enumToInt(Register.ymm0), @enumToInt(Register.xmm0) ... @enumToInt(Register.xmm15) => @enumToInt(Register.xmm0), + @enumToInt(Register.mm0) ... @enumToInt(Register.mm7) => @enumToInt(Register.mm0), + @enumToInt(Register.st0) ... @enumToInt(Register.st7) => @enumToInt(Register.st0), @enumToInt(Register.es) ... @enumToInt(Register.gs) => @enumToInt(Register.es), @@ -326,8 +340,8 @@ pub const Register = enum(u7) { return @intToEnum(Register, @enumToInt(reg) - reg.gpBase() + @enumToInt(Register.al)); } - fn fpBase(reg: Register) u7 { - assert(reg.class() == .floating_point); + fn sseBase(reg: Register) u7 { + assert(reg.class() == .sse); return switch (@enumToInt(reg)) { @enumToInt(Register.ymm0)...@enumToInt(Register.ymm15) => @enumToInt(Register.ymm0), @enumToInt(Register.xmm0)...@enumToInt(Register.xmm15) => @enumToInt(Register.xmm0), @@ -336,49 +350,24 @@ pub const Register = enum(u7) { } pub fn to256(reg: Register) Register { - return @intToEnum(Register, @enumToInt(reg) - reg.fpBase() + @enumToInt(Register.ymm0)); + return @intToEnum(Register, @enumToInt(reg) - reg.sseBase() + @enumToInt(Register.ymm0)); } pub fn to128(reg: Register) Register { - return @intToEnum(Register, @enumToInt(reg) - reg.fpBase() + @enumToInt(Register.xmm0)); - } - - pub fn dwarfLocOp(reg: Register) u8 { - return switch (reg.class()) { - .general_purpose => switch (reg.to64()) { - .rax => DW.OP.reg0, - .rdx => DW.OP.reg1, - .rcx => DW.OP.reg2, - .rbx => DW.OP.reg3, - .rsi => DW.OP.reg4, - .rdi => DW.OP.reg5, - .rbp => DW.OP.reg6, - .rsp => DW.OP.reg7, - else => @intCast(u8, @enumToInt(reg) - reg.gpBase()) + DW.OP.reg0, - }, - .floating_point => @intCast(u8, @enumToInt(reg) - reg.fpBase()) + DW.OP.reg17, - else => unreachable, - }; + return @intToEnum(Register, @enumToInt(reg) - reg.sseBase() + @enumToInt(Register.xmm0)); } - /// DWARF encodings that push a value onto the DWARF stack that is either - /// the contents of a register or the result of adding the contents a given - /// register to a given signed offset. - pub fn dwarfLocOpDeref(reg: Register) u8 { + /// DWARF register encoding + pub fn dwarfNum(reg: Register) u6 { return switch (reg.class()) { - .general_purpose => switch (reg.to64()) { - .rax => DW.OP.breg0, - .rdx => DW.OP.breg1, - .rcx => DW.OP.breg2, - .rbx => DW.OP.breg3, - .rsi => DW.OP.breg4, - .rdi => DW.OP.breg5, - .rbp => DW.OP.breg6, - .rsp => DW.OP.breg7, - else => @intCast(u8, @enumToInt(reg) - reg.gpBase()) + DW.OP.breg0, - }, - .floating_point => @intCast(u8, @enumToInt(reg) - reg.fpBase()) + DW.OP.breg17, - else => unreachable, + .general_purpose => if (reg.isExtended()) + reg.enc() + else + @truncate(u3, @as(u24, 0o54673120) >> @as(u5, reg.enc()) * 3), + .sse => 17 + @as(u6, reg.enc()), + .x87 => 33 + @as(u6, reg.enc()), + .mmx => 41 + @as(u6, reg.enc()), + .segment => 50 + @as(u6, reg.enc()), }; } }; @@ -392,6 +381,8 @@ test "Register id - different classes" { try expect(Register.ymm0.id() == 0b10000); try expect(Register.ymm0.id() != Register.rax.id()); try expect(Register.xmm0.id() == Register.ymm0.id()); + try expect(Register.xmm0.id() != Register.mm0.id()); + try expect(Register.mm0.id() != Register.st0.id()); try expect(Register.es.id() == 0b100000); } @@ -407,7 +398,9 @@ test "Register enc - different classes" { test "Register classes" { try expect(Register.r11.class() == .general_purpose); - try expect(Register.ymm11.class() == .floating_point); + try expect(Register.ymm11.class() == .sse); + try expect(Register.mm3.class() == .mmx); + try expect(Register.st3.class() == .x87); try expect(Register.fs.class() == .segment); } diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index a7a50867c3..3383315bd6 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -272,14 +272,6 @@ pub const table = [_]Entry{ .{ .div, .m, &.{ .rm32 }, &.{ 0xf7 }, 6, .none, .none }, .{ .div, .m, &.{ .rm64 }, &.{ 0xf7 }, 6, .long, .none }, - .{ .fisttp, .m, &.{ .m16 }, &.{ 0xdf }, 1, .none, .x87 }, - .{ .fisttp, .m, &.{ .m32 }, &.{ 0xdb }, 1, .none, .x87 }, - .{ .fisttp, .m, &.{ .m64 }, &.{ 0xdd }, 1, .none, .x87 }, - - .{ .fld, .m, &.{ .m32 }, &.{ 0xd9 }, 0, .none, .x87 }, - .{ .fld, .m, &.{ .m64 }, &.{ 0xdd }, 0, .none, .x87 }, - .{ .fld, .m, &.{ .m80 }, &.{ 0xdb }, 5, .none, .x87 }, - .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .none, .none }, .{ .idiv, .m, &.{ .rm8 }, &.{ 0xf6 }, 7, .rex, .none }, .{ .idiv, .m, &.{ .rm16 }, &.{ 0xf7 }, 7, .short, .none }, @@ -395,12 +387,12 @@ pub const table = [_]Entry{ .{ .mov, .mi, &.{ .rm32, .imm32 }, &.{ 0xc7 }, 0, .none, .none }, .{ .mov, .mi, &.{ .rm64, .imm32s }, &.{ 0xc7 }, 0, .long, .none }, - .{ .movbe, .rm, &.{ .r16, .m16 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .short, .none }, - .{ .movbe, .rm, &.{ .r32, .m32 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .none, .none }, - .{ .movbe, .rm, &.{ .r64, .m64 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .long, .none }, - .{ .movbe, .mr, &.{ .m16, .r16 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .short, .none }, - .{ .movbe, .mr, &.{ .m32, .r32 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .none, .none }, - .{ .movbe, .mr, &.{ .m64, .r64 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .long, .none }, + .{ .movbe, .rm, &.{ .r16, .m16 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .short, .movbe }, + .{ .movbe, .rm, &.{ .r32, .m32 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .none, .movbe }, + .{ .movbe, .rm, &.{ .r64, .m64 }, &.{ 0x0f, 0x38, 0xf0 }, 0, .long, .movbe }, + .{ .movbe, .mr, &.{ .m16, .r16 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .short, .movbe }, + .{ .movbe, .mr, &.{ .m32, .r32 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .none, .movbe }, + .{ .movbe, .mr, &.{ .m64, .r64 }, &.{ 0x0f, 0x38, 0xf1 }, 0, .long, .movbe }, .{ .movs, .np, &.{ .m8, .m8 }, &.{ 0xa4 }, 0, .none, .none }, .{ .movs, .np, &.{ .m16, .m16 }, &.{ 0xa5 }, 0, .short, .none }, @@ -836,6 +828,15 @@ pub const table = [_]Entry{ .{ .xor, .rm, &.{ .r32, .rm32 }, &.{ 0x33 }, 0, .none, .none }, .{ .xor, .rm, &.{ .r64, .rm64 }, &.{ 0x33 }, 0, .long, .none }, + // X87 + .{ .fisttp, .m, &.{ .m16 }, &.{ 0xdf }, 1, .none, .x87 }, + .{ .fisttp, .m, &.{ .m32 }, &.{ 0xdb }, 1, .none, .x87 }, + .{ .fisttp, .m, &.{ .m64 }, &.{ 0xdd }, 1, .none, .x87 }, + + .{ .fld, .m, &.{ .m32 }, &.{ 0xd9 }, 0, .none, .x87 }, + .{ .fld, .m, &.{ .m64 }, &.{ 0xdd }, 0, .none, .x87 }, + .{ .fld, .m, &.{ .m80 }, &.{ 0xdb }, 5, .none, .x87 }, + // SSE .{ .addps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x58 }, 0, .none, .sse }, @@ -847,9 +848,21 @@ pub const table = [_]Entry{ .{ .cmpss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0xf3, 0x0f, 0xc2 }, 0, .none, .sse }, + .{ .cvtpi2ps, .rm, &.{ .xmm, .mm_m64 }, &.{ 0x0f, 0x2a }, 0, .none, .sse }, + + .{ .cvtps2pi, .rm, &.{ .mm, .xmm_m64 }, &.{ 0x0f, 0x2d }, 0, .none, .sse }, + .{ .cvtsi2ss, .rm, &.{ .xmm, .rm32 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .none, .sse }, .{ .cvtsi2ss, .rm, &.{ .xmm, .rm64 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .long, .sse }, + .{ .cvtss2si, .rm, &.{ .r32, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2d }, 0, .none, .sse }, + .{ .cvtss2si, .rm, &.{ .r64, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2d }, 0, .long, .sse }, + + .{ .cvttps2pi, .rm, &.{ .mm, .xmm_m64 }, &.{ 0x0f, 0x2c }, 0, .none, .sse }, + + .{ .cvttss2si, .rm, &.{ .r32, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2c }, 0, .none, .sse }, + .{ .cvttss2si, .rm, &.{ .r64, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2c }, 0, .long, .sse }, + .{ .divps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5e }, 0, .none, .sse }, .{ .divss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5e }, 0, .none, .sse }, @@ -906,6 +919,25 @@ pub const table = [_]Entry{ .{ .cmpsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0xf2, 0x0f, 0xc2 }, 0, .none, .sse2 }, + .{ .cvtdq2pd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0xe6 }, 0, .none, .sse2 }, + + .{ .cvtdq2ps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5b }, 0, .none, .sse2 }, + + .{ .cvtpd2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf2, 0x0f, 0xe6 }, 0, .none, .sse2 }, + + .{ .cvtpd2pi, .rm, &.{ .mm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x2d }, 0, .none, .sse2 }, + + .{ .cvtpd2ps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5a }, 0, .none, .sse2 }, + + .{ .cvtpi2pd, .rm, &.{ .xmm, .mm_m64 }, &.{ 0x66, 0x0f, 0x2a }, 0, .none, .sse2 }, + + .{ .cvtps2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5b }, 0, .none, .sse2 }, + + .{ .cvtps2pd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x0f, 0x5a }, 0, .none, .sse2 }, + + .{ .cvtsd2si, .rm, &.{ .r32, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2d }, 0, .none, .sse2 }, + .{ .cvtsd2si, .rm, &.{ .r64, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2d }, 0, .long, .sse2 }, + .{ .cvtsd2ss, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .none, .sse2 }, .{ .cvtsi2sd, .rm, &.{ .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .none, .sse2 }, @@ -913,6 +945,15 @@ pub const table = [_]Entry{ .{ .cvtss2sd, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5a }, 0, .none, .sse2 }, + .{ .cvttpd2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe6 }, 0, .none, .sse2 }, + + .{ .cvttpd2pi, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x2c }, 0, .none, .sse2 }, + + .{ .cvttps2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x5b }, 0, .none, .sse2 }, + + .{ .cvttsd2si, .rm, &.{ .r32, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2c }, 0, .none, .sse2 }, + .{ .cvttsd2si, .rm, &.{ .r64, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2c }, 0, .long, .sse2 }, + .{ .divpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5e }, 0, .none, .sse2 }, .{ .divsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5e }, 0, .none, .sse2 }, @@ -1034,15 +1075,51 @@ pub const table = [_]Entry{ .{ .vbroadcastsd, .rm, &.{ .ymm, .m64 }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx }, .{ .vbroadcastf128, .rm, &.{ .ymm, .m128 }, &.{ 0x66, 0x0f, 0x38, 0x1a }, 0, .vex_256_w0, .avx }, + .{ .vcvtdq2pd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0xe6 }, 0, .vex_128_wig, .avx }, + .{ .vcvtdq2pd, .rm, &.{ .ymm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0xe6 }, 0, .vex_256_wig, .avx }, + + .{ .vcvtdq2ps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x5b }, 0, .vex_128_wig, .avx }, + .{ .vcvtdq2ps, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x5b }, 0, .vex_256_wig, .avx }, + + .{ .vcvtpd2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf2, 0x0f, 0xe6 }, 0, .vex_128_wig, .avx }, + .{ .vcvtpd2dq, .rm, &.{ .xmm, .ymm_m256 }, &.{ 0xf2, 0x0f, 0xe6 }, 0, .vex_256_wig, .avx }, + + .{ .vcvtpd2ps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5a }, 0, .vex_128_wig, .avx }, + .{ .vcvtpd2ps, .rm, &.{ .xmm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5a }, 0, .vex_256_wig, .avx }, + + .{ .vcvtps2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5b }, 0, .vex_128_wig, .avx }, + .{ .vcvtps2dq, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5b }, 0, .vex_256_wig, .avx }, + + .{ .vcvtps2pd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x0f, 0x5a }, 0, .vex_128_wig, .avx }, + .{ .vcvtps2pd, .rm, &.{ .ymm, .xmm_m128 }, &.{ 0x0f, 0x5a }, 0, .vex_256_wig, .avx }, + + .{ .vcvtsd2si, .rm, &.{ .r32, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2d }, 0, .vex_lig_w0, .sse2 }, + .{ .vcvtsd2si, .rm, &.{ .r64, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2d }, 0, .vex_lig_w1, .sse2 }, + .{ .vcvtsd2ss, .rvm, &.{ .xmm, .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .vex_lig_wig, .avx }, .{ .vcvtsi2sd, .rvm, &.{ .xmm, .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w0, .avx }, .{ .vcvtsi2sd, .rvm, &.{ .xmm, .xmm, .rm64 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w1, .avx }, - .{ .vcvtsi2ss, .rvm, &.{ .xmm, .xmm, .rm32 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w0, .avx }, - .{ .vcvtsi2ss, .rvm, &.{ .xmm, .xmm, .rm64 }, &.{ 0xf2, 0x0f, 0x2a }, 0, .vex_lig_w1, .avx }, + .{ .vcvtsi2ss, .rvm, &.{ .xmm, .xmm, .rm32 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .vex_lig_w0, .avx }, + .{ .vcvtsi2ss, .rvm, &.{ .xmm, .xmm, .rm64 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .vex_lig_w1, .avx }, + + .{ .vcvtss2sd, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5a }, 0, .vex_lig_wig, .avx }, + + .{ .vcvtss2si, .rm, &.{ .r32, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2d }, 0, .vex_lig_w0, .avx }, + .{ .vcvtss2si, .rm, &.{ .r64, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2d }, 0, .vex_lig_w1, .avx }, + + .{ .vcvttpd2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe6 }, 0, .vex_128_wig, .avx }, + .{ .vcvttpd2dq, .rm, &.{ .xmm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe6 }, 0, .vex_256_wig, .avx }, + + .{ .vcvttps2dq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x5b }, 0, .vex_128_wig, .avx }, + .{ .vcvttps2dq, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf3, 0x0f, 0x5b }, 0, .vex_256_wig, .avx }, + + .{ .vcvttsd2si, .rm, &.{ .r32, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2c }, 0, .vex_lig_w0, .sse2 }, + .{ .vcvttsd2si, .rm, &.{ .r64, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x2c }, 0, .vex_lig_w1, .sse2 }, - .{ .vcvtss2sd, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .vex_lig_wig, .avx }, + .{ .vcvttss2si, .rm, &.{ .r32, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2c }, 0, .vex_lig_w0, .avx }, + .{ .vcvttss2si, .rm, &.{ .r64, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x2c }, 0, .vex_lig_w1, .avx }, .{ .vdivpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5e }, 0, .vex_128_wig, .avx }, .{ .vdivpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5e }, 0, .vex_256_wig, .avx }, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index c134f60316..1a064049fc 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -608,23 +608,44 @@ pub const DeclState = struct { switch (loc) { .register => |reg| { - try dbg_info.ensureUnusedCapacity(3); + try dbg_info.ensureUnusedCapacity(4); dbg_info.appendAssumeCapacity(@enumToInt(AbbrevKind.parameter)); - dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc - 1, // ULEB128 dwarf expression length - reg, - }); + // DW.AT.location, DW.FORM.exprloc + var expr_len = std.io.countingWriter(std.io.null_writer); + if (reg < 32) { + expr_len.writer().writeByte(DW.OP.reg0 + reg) catch unreachable; + } else { + expr_len.writer().writeByte(DW.OP.regx) catch unreachable; + leb128.writeULEB128(expr_len.writer(), reg) catch unreachable; + } + leb128.writeULEB128(dbg_info.writer(), expr_len.bytes_written) catch unreachable; + if (reg < 32) { + dbg_info.appendAssumeCapacity(DW.OP.reg0 + reg); + } else { + dbg_info.appendAssumeCapacity(DW.OP.regx); + leb128.writeULEB128(dbg_info.writer(), reg) catch unreachable; + } }, .stack => |info| { - try dbg_info.ensureUnusedCapacity(8); + try dbg_info.ensureUnusedCapacity(9); dbg_info.appendAssumeCapacity(@enumToInt(AbbrevKind.parameter)); - const fixup = dbg_info.items.len; - dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc - 1, // we will backpatch it after we encode the displacement in LEB128 - info.fp_register, // frame pointer - }); + // DW.AT.location, DW.FORM.exprloc + var expr_len = std.io.countingWriter(std.io.null_writer); + if (info.fp_register < 32) { + expr_len.writer().writeByte(DW.OP.breg0 + info.fp_register) catch unreachable; + } else { + expr_len.writer().writeByte(DW.OP.bregx) catch unreachable; + leb128.writeULEB128(expr_len.writer(), info.fp_register) catch unreachable; + } + leb128.writeILEB128(expr_len.writer(), info.offset) catch unreachable; + leb128.writeULEB128(dbg_info.writer(), expr_len.bytes_written) catch unreachable; + if (info.fp_register < 32) { + dbg_info.appendAssumeCapacity(DW.OP.breg0 + info.fp_register); + } else { + dbg_info.appendAssumeCapacity(DW.OP.bregx); + leb128.writeULEB128(dbg_info.writer(), info.fp_register) catch unreachable; + } leb128.writeILEB128(dbg_info.writer(), info.offset) catch unreachable; - dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2); }, .wasm_local => |value| { const leb_size = link.File.Wasm.getULEB128Size(value); @@ -670,22 +691,45 @@ pub const DeclState = struct { switch (loc) { .register => |reg| { - try dbg_info.ensureUnusedCapacity(2); - dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc - 1, // ULEB128 dwarf expression length - reg, - }); + try dbg_info.ensureUnusedCapacity(4); + dbg_info.appendAssumeCapacity(@enumToInt(AbbrevKind.parameter)); + // DW.AT.location, DW.FORM.exprloc + var expr_len = std.io.countingWriter(std.io.null_writer); + if (reg < 32) { + expr_len.writer().writeByte(DW.OP.reg0 + reg) catch unreachable; + } else { + expr_len.writer().writeByte(DW.OP.regx) catch unreachable; + leb128.writeULEB128(expr_len.writer(), reg) catch unreachable; + } + leb128.writeULEB128(dbg_info.writer(), expr_len.bytes_written) catch unreachable; + if (reg < 32) { + dbg_info.appendAssumeCapacity(DW.OP.reg0 + reg); + } else { + dbg_info.appendAssumeCapacity(DW.OP.regx); + leb128.writeULEB128(dbg_info.writer(), reg) catch unreachable; + } }, .stack => |info| { - try dbg_info.ensureUnusedCapacity(7); - const fixup = dbg_info.items.len; - dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc - 1, // we will backpatch it after we encode the displacement in LEB128 - info.fp_register, - }); + try dbg_info.ensureUnusedCapacity(9); + dbg_info.appendAssumeCapacity(@enumToInt(AbbrevKind.parameter)); + // DW.AT.location, DW.FORM.exprloc + var expr_len = std.io.countingWriter(std.io.null_writer); + if (info.fp_register < 32) { + expr_len.writer().writeByte(DW.OP.breg0 + info.fp_register) catch unreachable; + } else { + expr_len.writer().writeByte(DW.OP.bregx) catch unreachable; + leb128.writeULEB128(expr_len.writer(), info.fp_register) catch unreachable; + } + leb128.writeILEB128(expr_len.writer(), info.offset) catch unreachable; + leb128.writeULEB128(dbg_info.writer(), expr_len.bytes_written) catch unreachable; + if (info.fp_register < 32) { + dbg_info.appendAssumeCapacity(DW.OP.breg0 + info.fp_register); + } else { + dbg_info.appendAssumeCapacity(DW.OP.bregx); + leb128.writeULEB128(dbg_info.writer(), info.fp_register) catch unreachable; + } leb128.writeILEB128(dbg_info.writer(), info.offset) catch unreachable; - dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2); }, .wasm_local => |value| { diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 20f84184a0..d6717032ff 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -153,7 +153,6 @@ test "@intToFloat(f80)" { test "@floatToInt" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; -- cgit v1.2.3 From 6c6d8d67cfe14c50684c04a579c1e62bf287e8cb Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 14 May 2023 05:12:46 -0400 Subject: x86_64: redo movement, float negation, and `@fabs` --- src/arch/x86_64/CodeGen.zig | 357 ++++++++++++++++++++++++----------- src/arch/x86_64/Encoding.zig | 18 +- src/arch/x86_64/Mir.zig | 12 ++ src/arch/x86_64/encodings.zig | 75 +++++++- src/type.zig | 12 +- test/behavior/floatop.zig | 1 - test/behavior/math.zig | 1 - test/behavior/translate_c_macros.zig | 1 - 8 files changed, 359 insertions(+), 118 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e5c6925596..80f537e046 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4681,61 +4681,136 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { + const tag = self.air.instructions.items(.tag)[inst]; const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); - const ty_bits = ty.floatBits(self.target.*); + const abi_size: u32 = switch (ty.abiSize(self.target.*)) { + 1...16 => 16, + 17...32 => 32, + else => return self.fail("TODO implement airFloatSign for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }; + const scalar_bits = ty.scalarType().floatBits(self.target.*); + + const src_mcv = try self.resolveInst(un_op); + const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; + defer if (src_lock) |lock| self.register_manager.unlockReg(lock); + + const dst_mcv: MCValue = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) + src_mcv + else if (self.hasFeature(.avx)) + .{ .register = try self.register_manager.allocReg(inst, sse) } + else + try self.copyToRegisterWithInstTracking(inst, ty, src_mcv); + const dst_reg = dst_mcv.getReg().?; + const dst_lock = self.register_manager.lockReg(dst_reg); + defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); var arena = std.heap.ArenaAllocator.init(self.gpa); defer arena.deinit(); - const ExpectedContents = union { - f16: Value.Payload.Float_16, - f32: Value.Payload.Float_32, - f64: Value.Payload.Float_64, - f80: Value.Payload.Float_80, - f128: Value.Payload.Float_128, + const ExpectedContents = struct { + scalar: union { + i64: Value.Payload.I64, + big: struct { + limbs: [ + @max( + std.math.big.int.Managed.default_capacity, + std.math.big.int.calcTwosCompLimbCount(128), + ) + ]std.math.big.Limb, + pl: Value.Payload.BigInt, + }, + }, + repeated: Value.Payload.SubValue, }; var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); + var int_pl = Type.Payload.Bits{ + .base = .{ .tag = .int_signed }, + .data = scalar_bits, + }; var vec_pl = Type.Payload.Array{ .base = .{ .tag = .vector }, .data = .{ - .len = @divExact(128, ty_bits), - .elem_type = ty, + .len = @divExact(abi_size * 8, scalar_bits), + .elem_type = Type.initPayload(&int_pl.base), }, }; const vec_ty = Type.initPayload(&vec_pl.base); - - var sign_pl = Value.Payload.SubValue{ - .base = .{ .tag = .repeated }, - .data = try Value.floatToValue(-0.0, stack.get(), ty, self.target.*), + const sign_val = switch (tag) { + .neg => try vec_ty.minInt(stack.get(), self.target.*), + .fabs => try vec_ty.maxInt(stack.get(), self.target.*), + else => unreachable, }; - const sign_val = Value.initPayload(&sign_pl.base); const sign_mcv = try self.genTypedValue(.{ .ty = vec_ty, .val = sign_val }); - - const src_mcv = try self.resolveInst(un_op); - const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) - src_mcv + const sign_mem = if (sign_mcv.isMemory()) + sign_mcv.mem(Memory.PtrSize.fromSize(abi_size)) else - try self.copyToRegisterWithInstTracking(inst, ty, src_mcv); - const dst_lock = self.register_manager.lockReg(dst_mcv.register); - defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ + .base = .{ .reg = try self.copyToTmpRegister(Type.usize, sign_mcv.address()) }, + }); - const tag = self.air.instructions.items(.tag)[inst]; - try self.genBinOpMir(switch (ty_bits) { - // No point using an extra prefix byte for *pd which performs the same operation. - 16, 32, 64, 128 => switch (tag) { - .neg => .{ ._ps, .xor }, - .fabs => .{ ._ps, .andn }, + if (self.hasFeature(.avx)) try self.asmRegisterRegisterMemory( + switch (scalar_bits) { + 16, 128 => if (abi_size <= 16 or self.hasFeature(.avx2)) switch (tag) { + .neg => .{ .vp_, .xor }, + .fabs => .{ .vp_, .@"and" }, + else => unreachable, + } else switch (tag) { + .neg => .{ .v_ps, .xor }, + .fabs => .{ .v_ps, .@"and" }, + else => unreachable, + }, + 32 => switch (tag) { + .neg => .{ .v_ps, .xor }, + .fabs => .{ .v_ps, .@"and" }, + else => unreachable, + }, + 64 => switch (tag) { + .neg => .{ .v_pd, .xor }, + .fabs => .{ .v_pd, .@"and" }, + else => unreachable, + }, + 80 => return self.fail("TODO implement airFloatSign for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), else => unreachable, }, - 80 => return self.fail("TODO implement airFloatSign for {}", .{ - ty.fmt(self.bin_file.options.module.?), - }), - else => unreachable, - }, vec_ty, dst_mcv, sign_mcv); + registerAlias(dst_reg, abi_size), + registerAlias(if (src_mcv.isRegister()) + src_mcv.getReg().? + else + try self.copyToTmpRegister(ty, src_mcv), abi_size), + sign_mem, + ) else try self.asmRegisterMemory( + switch (scalar_bits) { + 16, 128 => switch (tag) { + .neg => .{ .p_, .xor }, + .fabs => .{ .p_, .@"and" }, + else => unreachable, + }, + 32 => switch (tag) { + .neg => .{ ._ps, .xor }, + .fabs => .{ ._ps, .@"and" }, + else => unreachable, + }, + 64 => switch (tag) { + .neg => .{ ._pd, .xor }, + .fabs => .{ ._pd, .@"and" }, + else => unreachable, + }, + 80 => return self.fail("TODO implement airFloatSign for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + else => unreachable, + }, + registerAlias(dst_reg, abi_size), + sign_mem, + ); return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } @@ -8593,7 +8668,6 @@ const MoveStrategy = union(enum) { const InsertExtract = struct { insert: Mir.Inst.FixedTag, extract: Mir.Inst.FixedTag, - imm: Immediate, }; }; fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { @@ -8603,17 +8677,15 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { 16 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, - .imm = Immediate.u(0), } } else .{ .insert_extract = .{ .insert = .{ .p_w, .insr }, .extract = .{ .p_w, .extr }, - .imm = Immediate.u(0), } }, 32 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_ss, .mov } else .{ ._ss, .mov } }, 64 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_sd, .mov } else .{ ._sd, .mov } }, 128 => return .{ .move = if (self.hasFeature(.avx)) - if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, + if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } + else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, else => {}, }, .Vector => switch (ty.childType().zigTypeTag()) { @@ -8622,101 +8694,120 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ .insert = .{ .vp_b, .insr }, .extract = .{ .vp_b, .extr }, - .imm = Immediate.u(0), } } else if (self.hasFeature(.sse4_2)) return .{ .insert_extract = .{ .insert = .{ .p_b, .insr }, .extract = .{ .p_b, .extr }, - .imm = Immediate.u(0), } }, 2 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, - .imm = Immediate.u(0), } } else .{ .insert_extract = .{ .insert = .{ .p_w, .insr }, .extract = .{ .p_w, .extr }, - .imm = Immediate.u(0), } }, 3...4 => return .{ .move = if (self.hasFeature(.avx)) - .{ .v_ss, .mov } + .{ .v_d, .mov } else - .{ ._ss, .mov } }, + .{ ._d, .mov } }, 5...8 => return .{ .move = if (self.hasFeature(.avx)) - .{ .v_sd, .mov } + .{ .v_q, .mov } else - .{ ._sd, .mov } }, + .{ ._q, .mov } }, + 9...16 => return .{ .move = if (self.hasFeature(.avx)) + if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } + else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, + 17...32 => if (self.hasFeature(.avx)) + return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, 16 => switch (ty.vectorLen()) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, - .imm = Immediate.u(0), } } else .{ .insert_extract = .{ .insert = .{ .p_w, .insr }, .extract = .{ .p_w, .extr }, - .imm = Immediate.u(0), } }, 2 => return .{ .move = if (self.hasFeature(.avx)) - .{ .v_ss, .mov } + .{ .v_d, .mov } else - .{ ._ss, .mov } }, + .{ ._d, .mov } }, 3...4 => return .{ .move = if (self.hasFeature(.avx)) - .{ .v_sd, .mov } + .{ .v_q, .mov } else - .{ ._sd, .mov } }, + .{ ._q, .mov } }, 5...8 => return .{ .move = if (self.hasFeature(.avx)) - .{ .v_ps, .mov } - else - .{ ._ps, .mov } }, + if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } + else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, + 9...16 => if (self.hasFeature(.avx)) + return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, 32 => switch (ty.vectorLen()) { 1 => return .{ .move = if (self.hasFeature(.avx)) - .{ .v_ss, .mov } + .{ .v_d, .mov } else - .{ ._ss, .mov } }, + .{ ._d, .mov } }, 2 => return .{ .move = if (self.hasFeature(.avx)) - .{ .v_sd, .mov } + .{ .v_q, .mov } else - .{ ._sd, .mov } }, + .{ ._q, .mov } }, 3...4 => return .{ .move = if (self.hasFeature(.avx)) - if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, + if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } + else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, 5...8 => if (self.hasFeature(.avx)) - return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, + return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, 64 => switch (ty.vectorLen()) { 1 => return .{ .move = if (self.hasFeature(.avx)) - .{ .v_sd, .mov } + .{ .v_q, .mov } else - .{ ._sd, .mov } }, + .{ ._q, .mov } }, 2 => return .{ .move = if (self.hasFeature(.avx)) - if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, + if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } + else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, 3...4 => if (self.hasFeature(.avx)) - return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, + return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, + else => {}, + }, + 128 => switch (ty.vectorLen()) { + 1 => return .{ .move = if (self.hasFeature(.avx)) + if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } + else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, + 2 => if (self.hasFeature(.avx)) + return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, + else => {}, + }, + 256 => switch (ty.vectorLen()) { + 1 => if (self.hasFeature(.avx)) + return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, else => {}, }, .Float => switch (ty.childType().floatBits(self.target.*)) { 16 => switch (ty.vectorLen()) { - 1 => {}, + 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ + .insert = .{ .vp_w, .insr }, + .extract = .{ .vp_w, .extr }, + } } else .{ .insert_extract = .{ + .insert = .{ .p_w, .insr }, + .extract = .{ .p_w, .extr }, + } }, 2 => return .{ .move = if (self.hasFeature(.avx)) - .{ .v_ss, .mov } + .{ .v_d, .mov } else - .{ ._ss, .mov } }, + .{ ._d, .mov } }, 3...4 => return .{ .move = if (self.hasFeature(.avx)) - .{ .v_sd, .mov } + .{ .v_q, .mov } else - .{ ._sd, .mov } }, + .{ ._q, .mov } }, 5...8 => return .{ .move = if (self.hasFeature(.avx)) - if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, + if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } + else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, 9...16 => if (self.hasFeature(.avx)) - return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, + return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, 32 => switch (ty.vectorLen()) { @@ -8741,18 +8832,18 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { else .{ ._sd, .mov } }, 2 => return .{ .move = if (self.hasFeature(.avx)) - if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, + if (aligned) .{ .v_pd, .mova } else .{ .v_pd, .movu } + else if (aligned) .{ ._pd, .mova } else .{ ._pd, .movu } }, 3...4 => if (self.hasFeature(.avx)) - return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, + return .{ .move = if (aligned) .{ .v_pd, .mova } else .{ .v_pd, .movu } }, else => {}, }, 128 => switch (ty.vectorLen()) { 1 => return .{ .move = if (self.hasFeature(.avx)) - if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } - else if (aligned) .{ ._ps, .mova } else .{ ._ps, .movu } }, + if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } + else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, 2 => if (self.hasFeature(.avx)) - return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, + return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, else => {}, @@ -8860,29 +8951,69 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ); } }, - .register => |src_reg| if (dst_reg.id() != src_reg.id()) try self.asmRegisterRegister( - if ((dst_reg.class() == .sse) == (src_reg.class() == .sse)) - switch (ty.zigTypeTag()) { - else => .{ ._, .mov }, - .Float, .Vector => .{ ._ps, .mova }, - } - else switch (abi_size) { - 2 => return try self.asmRegisterRegisterImmediate( - if (dst_reg.class() == .sse) .{ .p_w, .insr } else .{ .p_w, .extr }, - registerAlias(dst_reg, 4), - registerAlias(src_reg, 4), - Immediate.u(0), + .register => |src_reg| if (dst_reg.id() != src_reg.id()) switch (dst_reg.class()) { + .general_purpose => switch (src_reg.class()) { + .general_purpose => try self.asmRegisterRegister( + .{ ._, .mov }, + registerAlias(dst_reg, abi_size), + registerAlias(src_reg, abi_size), ), - 4 => .{ ._d, .mov }, - 8 => .{ ._q, .mov }, - else => return self.fail( - "unsupported register copy from {s} to {s}", - .{ @tagName(src_reg), @tagName(dst_reg) }, + .segment => try self.asmRegisterRegister( + .{ ._, .mov }, + registerAlias(dst_reg, abi_size), + src_reg, ), + .sse => try self.asmRegisterRegister( + switch (abi_size) { + 1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, + 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, + else => unreachable, + }, + registerAlias(dst_reg, @max(abi_size, 4)), + src_reg.to128(), + ), + .x87, .mmx => unreachable, }, - registerAlias(dst_reg, abi_size), - registerAlias(src_reg, abi_size), - ), + .segment => try self.asmRegisterRegister( + .{ ._, .mov }, + dst_reg, + switch (src_reg.class()) { + .general_purpose, .segment => registerAlias(src_reg, abi_size), + .sse => try self.copyToTmpRegister(ty, src_mcv), + .x87, .mmx => unreachable, + }, + ), + .sse => switch (src_reg.class()) { + .general_purpose => try self.asmRegisterRegister( + switch (abi_size) { + 1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, + 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, + else => unreachable, + }, + dst_reg.to128(), + registerAlias(src_reg, @max(abi_size, 4)), + ), + .segment => try self.genSetReg( + dst_reg, + ty, + .{ .register = try self.copyToTmpRegister(ty, src_mcv) }, + ), + .sse => try self.asmRegisterRegister( + switch (ty.scalarType().zigTypeTag()) { + else => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else .{ ._, .movdqa }, + .Float => switch (ty.floatBits(self.target.*)) { + else => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else .{ ._, .movdqa }, + 32 => if (self.hasFeature(.avx)) .{ .v_ps, .mova } else .{ ._ps, .mova }, + 64 => if (self.hasFeature(.avx)) .{ .v_pd, .mova } else .{ ._pd, .mova }, + }, + }, + registerAlias(dst_reg, abi_size), + registerAlias(src_reg, abi_size), + ), + .x87, .mmx => unreachable, + }, + .x87, .mmx => unreachable, + }, .register_offset, .indirect, .load_frame, @@ -8918,14 +9049,14 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ie.insert, dst_alias, src_mem, - ie.imm, + Immediate.u(0), ), .vex_insert_extract => |ie| try self.asmRegisterRegisterMemoryImmediate( ie.insert, dst_alias, dst_alias, src_mem, - ie.imm, + Immediate.u(0), ), } }, @@ -8947,14 +9078,14 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ie.insert, dst_alias, src_mem, - ie.imm, + Immediate.u(0), ), .vex_insert_extract => |ie| try self.asmRegisterRegisterMemoryImmediate( ie.insert, dst_alias, dst_alias, src_mem, - ie.imm, + Immediate.u(0), ), } }, @@ -8994,14 +9125,14 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ie.insert, dst_alias, src_mem, - ie.imm, + Immediate.u(0), ), .vex_insert_extract => |ie| try self.asmRegisterRegisterMemoryImmediate( ie.insert, dst_alias, dst_alias, src_mem, - ie.imm, + Immediate.u(0), ), } }, @@ -9129,7 +9260,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal ie.extract, dst_mem, src_alias, - ie.imm, + Immediate.u(0), ), } }, @@ -10499,7 +10630,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy(src_ty, dst_mcv, src_mcv); + try self.genCopy(union_ty, dst_mcv, src_mcv); break :result dst_mcv; } @@ -11000,7 +11131,15 @@ fn registerAlias(reg: Register, size_bytes: u32) Register { reg.to64() else unreachable, - .segment, .x87, .mmx => unreachable, + .segment => if (size_bytes <= 2) + reg + else + unreachable, + .x87 => unreachable, + .mmx => if (size_bytes <= 8) + reg + else + unreachable, .sse => if (size_bytes <= 16) reg.to128() else if (size_bytes <= 32) diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 66a249a3f2..4014947673 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -261,7 +261,8 @@ pub const Mnemonic = enum { // X87 fisttp, fld, // MMX - movd, + movd, movq, + pand, pandn, por, pxor, // SSE addps, addss, andps, @@ -293,7 +294,8 @@ pub const Mnemonic = enum { maxpd, maxsd, minpd, minsd, movapd, - movq, //movd, movsd, + movdqa, movdqu, + //movsd, movupd, mulpd, mulsd, orpd, @@ -316,6 +318,7 @@ pub const Mnemonic = enum { roundpd, roundps, roundsd, roundss, // AVX vaddpd, vaddps, vaddsd, vaddss, + vandnpd, vandnps, vandpd, vandps, vbroadcastf128, vbroadcastsd, vbroadcastss, vcvtdq2pd, vcvtdq2ps, vcvtpd2dq, vcvtpd2ps, vcvtps2dq, vcvtps2pd, vcvtsd2si, vcvtsd2ss, @@ -327,22 +330,31 @@ pub const Mnemonic = enum { vmaxpd, vmaxps, vmaxsd, vmaxss, vminpd, vminps, vminsd, vminss, vmovapd, vmovaps, - vmovddup, vmovhlps, vmovlhps, + vmovd, + vmovddup, + vmovdqa, vmovdqu, + vmovhlps, vmovlhps, + vmovq, vmovsd, vmovshdup, vmovsldup, vmovss, vmovupd, vmovups, vmulpd, vmulps, vmulsd, vmulss, + vorpd, vorps, + vpand, vpandn, vpextrb, vpextrd, vpextrq, vpextrw, vpinsrb, vpinsrd, vpinsrq, vpinsrw, + vpor, vpshufhw, vpshuflw, vpsrld, vpsrlq, vpsrlw, vpunpckhbw, vpunpckhdq, vpunpckhqdq, vpunpckhwd, vpunpcklbw, vpunpckldq, vpunpcklqdq, vpunpcklwd, + vpxor, vroundpd, vroundps, vroundsd, vroundss, vshufpd, vshufps, vsqrtpd, vsqrtps, vsqrtsd, vsqrtss, vsubpd, vsubps, vsubsd, vsubss, + vxorpd, vxorps, // F16C vcvtph2ps, vcvtps2ph, // FMA diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index ef8bbe07b3..4d1f59e454 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -236,6 +236,14 @@ pub const Inst = struct { /// VEX-Encoded ___ v_, + /// VEX-Encoded ___ Byte + v_b, + /// VEX-Encoded ___ Word + v_w, + /// VEX-Encoded ___ Doubleword + v_d, + /// VEX-Encoded ___ QuadWord + v_q, /// VEX-Encoded Packed ___ vp_, /// VEX-Encoded Packed ___ Byte @@ -526,6 +534,10 @@ pub const Inst = struct { cvttps2dq, /// Convert with truncation scalar double-precision floating-point value to doubleword integer cvttsd2si, + /// Move aligned packed integer values + movdqa, + /// Move unaligned packed integer values + movdqu, /// Packed interleave shuffle of quadruplets of single-precision floating-point values /// Packed interleave shuffle of pairs of double-precision floating-point values shuf, diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 3383315bd6..3e57be61ea 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -970,11 +970,16 @@ pub const table = [_]Entry{ .{ .movapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .none, .sse2 }, .{ .movd, .rm, &.{ .xmm, .rm32 }, &.{ 0x66, 0x0f, 0x6e }, 0, .none, .sse2 }, - .{ .movd, .mr, &.{ .rm32, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .none, .sse2 }, - .{ .movq, .rm, &.{ .xmm, .rm64 }, &.{ 0x66, 0x0f, 0x6e }, 0, .long, .sse2 }, + .{ .movd, .mr, &.{ .rm32, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .none, .sse2 }, .{ .movq, .mr, &.{ .rm64, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .long, .sse2 }, + .{ .movdqa, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6f }, 0, .none, .sse2 }, + .{ .movdqa, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x7f }, 0, .none, .sse2 }, + + .{ .movdqu, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x6f }, 0, .none, .sse2 }, + .{ .movdqu, .mr, &.{ .xmm_m128, .xmm }, &.{ 0xf3, 0x0f, 0x7f }, 0, .none, .sse2 }, + .{ .movq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .none, .sse2 }, .{ .movq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .none, .sse2 }, @@ -987,10 +992,16 @@ pub const table = [_]Entry{ .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .none, .sse2 }, + .{ .pand, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdb }, 0, .none, .sse2 }, + + .{ .pandn, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdf }, 0, .none, .sse2 }, + .{ .pextrw, .rmi, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .none, .sse2 }, .{ .pinsrw, .rmi, &.{ .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .none, .sse2 }, + .{ .por, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xeb }, 0, .none, .sse2 }, + .{ .pshufhw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf3, 0x0f, 0x70 }, 0, .none, .sse2 }, .{ .pshuflw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf2, 0x0f, 0x70 }, 0, .none, .sse2 }, @@ -1012,6 +1023,8 @@ pub const table = [_]Entry{ .{ .punpckldq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .none, .sse2 }, .{ .punpcklqdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .none, .sse2 }, + .{ .pxor, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xef }, 0, .none, .sse2 }, + .{ .shufpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0xc6 }, 0, .none, .sse2 }, .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .none, .sse2 }, @@ -1070,6 +1083,18 @@ pub const table = [_]Entry{ .{ .vaddss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .vex_lig_wig, .avx }, + .{ .vandnpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x55 }, 0, .vex_128_wig, .avx }, + .{ .vandnpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x55 }, 0, .vex_256_wig, .avx }, + + .{ .vandnps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x55 }, 0, .vex_128_wig, .avx }, + .{ .vandnps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x55 }, 0, .vex_256_wig, .avx }, + + .{ .vandpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x54 }, 0, .vex_128_wig, .avx }, + .{ .vandpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x54 }, 0, .vex_256_wig, .avx }, + + .{ .vandps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x54 }, 0, .vex_128_wig, .avx }, + .{ .vandps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x54 }, 0, .vex_256_wig, .avx }, + .{ .vbroadcastss, .rm, &.{ .xmm, .m32 }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_128_w0, .avx }, .{ .vbroadcastss, .rm, &.{ .ymm, .m32 }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx }, .{ .vbroadcastsd, .rm, &.{ .ymm, .m64 }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx }, @@ -1169,13 +1194,31 @@ pub const table = [_]Entry{ .{ .vmovaps, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x0f, 0x28 }, 0, .vex_256_wig, .avx }, .{ .vmovaps, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x0f, 0x29 }, 0, .vex_256_wig, .avx }, + .{ .vmovd, .rm, &.{ .xmm, .rm32 }, &.{ 0x66, 0x0f, 0x6e }, 0, .vex_128_w0, .avx }, + .{ .vmovq, .rm, &.{ .xmm, .rm64 }, &.{ 0x66, 0x0f, 0x6e }, 0, .vex_128_w1, .avx }, + .{ .vmovd, .mr, &.{ .rm32, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .vex_128_w0, .avx }, + .{ .vmovq, .mr, &.{ .rm64, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .vex_128_w1, .avx }, + .{ .vmovddup, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_128_wig, .avx }, .{ .vmovddup, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf2, 0x0f, 0x12 }, 0, .vex_256_wig, .avx }, + .{ .vmovdqa, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6f }, 0, .vex_128_wig, .avx }, + .{ .vmovdqa, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x7f }, 0, .vex_128_wig, .avx }, + .{ .vmovdqa, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6f }, 0, .vex_256_wig, .avx }, + .{ .vmovdqa, .mr, &.{ .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x7f }, 0, .vex_256_wig, .avx }, + + .{ .vmovdqu, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x6f }, 0, .vex_128_wig, .avx }, + .{ .vmovdqu, .mr, &.{ .xmm_m128, .xmm }, &.{ 0xf3, 0x0f, 0x7f }, 0, .vex_128_wig, .avx }, + .{ .vmovdqu, .rm, &.{ .ymm, .ymm_m256 }, &.{ 0xf3, 0x0f, 0x6f }, 0, .vex_256_wig, .avx }, + .{ .vmovdqu, .mr, &.{ .ymm_m256, .ymm }, &.{ 0xf3, 0x0f, 0x7f }, 0, .vex_256_wig, .avx }, + .{ .vmovhlps, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0x0f, 0x12 }, 0, .vex_128_wig, .avx }, .{ .vmovlhps, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0x0f, 0x16 }, 0, .vex_128_wig, .avx }, + .{ .vmovq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .vex_128_wig, .avx }, + .{ .vmovq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .vex_128_wig, .avx }, + .{ .vmovsd, .rvm, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, .{ .vmovsd, .rm, &.{ .xmm, .m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .vex_lig_wig, .avx }, .{ .vmovsd, .mvr, &.{ .xmm, .xmm, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .vex_lig_wig, .avx }, @@ -1212,6 +1255,16 @@ pub const table = [_]Entry{ .{ .vmulss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .vex_lig_wig, .avx }, + .{ .vorpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .vex_128_wig, .avx }, + .{ .vorpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x56 }, 0, .vex_256_wig, .avx }, + + .{ .vorps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .vex_128_wig, .avx }, + .{ .vorps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x56 }, 0, .vex_256_wig, .avx }, + + .{ .vpand, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdb }, 0, .vex_128_wig, .avx }, + + .{ .vpandn, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_128_wig, .avx }, + .{ .vpextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .vex_128_w0, .avx }, .{ .vpextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .vex_128_w0, .avx }, .{ .vpextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .vex_128_w1, .avx }, @@ -1225,6 +1278,8 @@ pub const table = [_]Entry{ .{ .vpinsrw, .rvmi, &.{ .xmm, .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .vex_128_wig, .avx }, + .{ .vpor, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xeb }, 0, .vex_128_wig, .avx }, + .{ .vpsrlw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_128_wig, .avx }, .{ .vpsrlw, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_128_wig, .avx }, .{ .vpsrld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_128_wig, .avx }, @@ -1242,6 +1297,8 @@ pub const table = [_]Entry{ .{ .vpunpckldq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x62 }, 0, .vex_128_wig, .avx }, .{ .vpunpcklqdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6c }, 0, .vex_128_wig, .avx }, + .{ .vpxor, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xef }, 0, .vex_128_wig, .avx }, + .{ .vroundpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .vex_128_wig, .avx }, .{ .vroundpd, .rmi, &.{ .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .vex_256_wig, .avx }, @@ -1278,6 +1335,12 @@ pub const table = [_]Entry{ .{ .vsubss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .vex_lig_wig, .avx }, + .{ .vxorpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .vex_128_wig, .avx }, + .{ .vxorpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x57 }, 0, .vex_256_wig, .avx }, + + .{ .vxorps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x57 }, 0, .vex_128_wig, .avx }, + .{ .vxorps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x57 }, 0, .vex_256_wig, .avx }, + // F16C .{ .vcvtph2ps, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_128_w0, .f16c }, .{ .vcvtph2ps, .rm, &.{ .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x13 }, 0, .vex_256_w0, .f16c }, @@ -1313,6 +1376,12 @@ pub const table = [_]Entry{ .{ .vbroadcastss, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx2 }, .{ .vbroadcastsd, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx2 }, + .{ .vpand, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdb }, 0, .vex_256_wig, .avx2 }, + + .{ .vpandn, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_256_wig, .avx2 }, + + .{ .vpor, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xeb }, 0, .vex_256_wig, .avx2 }, + .{ .vpsrlw, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_256_wig, .avx2 }, .{ .vpsrlw, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x71 }, 2, .vex_256_wig, .avx2 }, .{ .vpsrld, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd2 }, 0, .vex_256_wig, .avx2 }, @@ -1329,5 +1398,7 @@ pub const table = [_]Entry{ .{ .vpunpcklwd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x61 }, 0, .vex_256_wig, .avx2 }, .{ .vpunpckldq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x62 }, 0, .vex_256_wig, .avx2 }, .{ .vpunpcklqdq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6c }, 0, .vex_256_wig, .avx2 }, + + .{ .vpxor, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xef }, 0, .vex_256_wig, .avx2 }, }; // zig fmt: on diff --git a/src/type.zig b/src/type.zig index 6122afda62..bcbb9e2ea2 100644 --- a/src/type.zig +++ b/src/type.zig @@ -5433,8 +5433,18 @@ pub const Type = extern union { } } + // Works for vectors and vectors of integers. + pub fn maxInt(ty: Type, arena: Allocator, target: Target) !Value { + const scalar = try maxIntScalar(ty.scalarType(), arena, target); + if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) { + return Value.Tag.repeated.create(arena, scalar); + } else { + return scalar; + } + } + /// Asserts that self.zigTypeTag() == .Int. - pub fn maxInt(self: Type, arena: Allocator, target: Target) !Value { + pub fn maxIntScalar(self: Type, arena: Allocator, target: Target) !Value { assert(self.zigTypeTag() == .Int); const info = self.intInfo(target); diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index 9d17b05865..a3fd5b69e8 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -532,7 +532,6 @@ fn testFabs() !void { test "@fabs with vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 991521b62c..7a563c1727 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -1612,7 +1612,6 @@ test "absFloat" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig index aa08e8c9aa..b3d1a688fe 100644 --- a/test/behavior/translate_c_macros.zig +++ b/test/behavior/translate_c_macros.zig @@ -65,7 +65,6 @@ test "cast negative integer to pointer" { test "casting to union with a macro" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 037bf1a580fe24b427e0ee5f7aecfec7202c1bf3 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 14 May 2023 17:26:44 -0400 Subject: x86_64: enable integer vector registers --- src/arch/x86_64/CodeGen.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 80f537e046..51e86447dc 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2261,11 +2261,11 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b }, .Vector => switch (ty.childType().zigTypeTag()) { .Float => switch (ty.childType().floatBits(self.target.*)) { - 16, 32, 64 => if (self.hasFeature(.avx)) 32 else 16, - 80, 128 => break :need_mem, + 16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16, + 80 => break :need_mem, else => unreachable, }, - else => break :need_mem, + else => if (self.hasFeature(.avx)) 32 else 16, }, else => 8, })) { -- cgit v1.2.3 From 37ccf35ff207b8866b3fc433dd57d7c7d6bac710 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 14 May 2023 17:56:34 -0400 Subject: x86_64: fix struct_field_val crash --- src/arch/x86_64/CodeGen.zig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 51e86447dc..87e1f9e45b 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -5411,6 +5411,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const field_ty = container_ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; const field_rc = regClassForType(field_ty); + const field_is_gp = field_rc.supersetOf(gp); const src_mcv = try self.resolveInst(operand); const field_off = switch (container_ty.containerLayout()) { @@ -5443,7 +5444,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO implement struct_field_val with large packed field", .{}); } - const dst_reg = try self.register_manager.allocReg(inst, gp); + const dst_reg = try self.register_manager.allocReg(if (field_is_gp) inst else null, gp); const field_extra_bits = self.regExtraBits(field_ty); const load_abi_size = if (field_bit_off < field_extra_bits) field_abi_size else field_abi_size * 2; @@ -5494,7 +5495,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { if (field_extra_bits > 0) try self.truncateRegister(field_ty, dst_reg); const dst_mcv = MCValue{ .register = dst_reg }; - break :result if (field_rc.supersetOf(gp)) + break :result if (field_is_gp) dst_mcv else try self.copyToRegisterWithInstTracking(inst, field_ty, dst_mcv); -- cgit v1.2.3 From 77a8cb57287e8d6f8430f1dedecda2bfb30506f1 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 14 May 2023 20:27:31 -0400 Subject: x86_64: fix `@clz` and `@ctz` of `u8` --- src/arch/x86_64/CodeGen.zig | 61 ++++++++++++++++++++++++++++++++++++++------- test/behavior/math.zig | 2 -- 2 files changed, 52 insertions(+), 11 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 87e1f9e45b..9d5f877e14 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4226,9 +4226,18 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const src_bits = src_ty.bitSize(self.target.*); if (self.hasFeature(.lzcnt)) { - if (src_bits <= 64) { + if (src_bits <= 8) { + const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv); + try self.truncateRegister(src_ty, wide_reg); + try self.genBinOpMir(.{ ._, .lzcnt }, Type.u32, dst_mcv, .{ .register = wide_reg }); + try self.genBinOpMir( + .{ ._, .sub }, + dst_ty, + dst_mcv, + .{ .immediate = 8 + self.regExtraBits(src_ty) }, + ); + } else if (src_bits <= 64) { try self.genBinOpMir(.{ ._, .lzcnt }, src_ty, dst_mcv, mat_src_mcv); - const extra_bits = self.regExtraBits(src_ty); if (extra_bits > 0) { try self.genBinOpMir(.{ ._, .sub }, dst_ty, dst_mcv, .{ .immediate = extra_bits }); @@ -4267,7 +4276,17 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits ^ (src_bits - 1), }); - try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); + const imm_lock = self.register_manager.lockRegAssumeUnused(imm_reg); + defer self.register_manager.unlockReg(imm_lock); + + if (src_bits <= 8) { + const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv); + const wide_lock = self.register_manager.lockRegAssumeUnused(wide_reg); + defer self.register_manager.unlockReg(wide_lock); + + try self.truncateRegister(src_ty, wide_reg); + try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg }); + } else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); try self.asmCmovccRegisterRegister( @@ -4281,7 +4300,20 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - self.regBitSize(dst_ty)), }); - try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); + const imm_lock = self.register_manager.lockRegAssumeUnused(imm_reg); + defer self.register_manager.unlockReg(imm_lock); + + const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv); + const wide_lock = self.register_manager.lockRegAssumeUnused(wide_reg); + defer self.register_manager.unlockReg(wide_lock); + + try self.truncateRegister(src_ty, wide_reg); + try self.genBinOpMir( + .{ ._, .bsr }, + if (src_bits <= 8) Type.u16 else src_ty, + dst_mcv, + .{ .register = wide_reg }, + ); const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); try self.asmCmovccRegisterRegister( @@ -4323,24 +4355,25 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { if (self.hasFeature(.bmi)) { if (src_bits <= 64) { - const extra_bits = self.regExtraBits(src_ty); + const extra_bits = self.regExtraBits(src_ty) + @as(u64, if (src_bits <= 8) 8 else 0); + const wide_ty = if (src_bits <= 8) Type.u16 else src_ty; const masked_mcv = if (extra_bits > 0) masked: { const tmp_mcv = tmp: { if (src_mcv.isImmediate() or self.liveness.operandDies(inst, 0)) break :tmp src_mcv; - try self.genSetReg(dst_reg, src_ty, src_mcv); + try self.genSetReg(dst_reg, wide_ty, src_mcv); break :tmp dst_mcv; }; try self.genBinOpMir( .{ ._, .@"or" }, - src_ty, + wide_ty, tmp_mcv, .{ .immediate = (@as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - extra_bits)) << @intCast(u6, src_bits) }, ); break :masked tmp_mcv; } else mat_src_mcv; - try self.genBinOpMir(.{ ._, .tzcnt }, src_ty, dst_mcv, masked_mcv); + try self.genBinOpMir(.{ ._, .tzcnt }, wide_ty, dst_mcv, masked_mcv); } else if (src_bits <= 128) { const tmp_reg = try self.register_manager.allocReg(null, gp); const tmp_mcv = MCValue{ .register = tmp_reg }; @@ -4369,7 +4402,17 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { return self.fail("TODO airCtz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); const width_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits }); - try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); + const width_lock = self.register_manager.lockRegAssumeUnused(width_reg); + defer self.register_manager.unlockReg(width_lock); + + if (src_bits <= 8 or !math.isPowerOfTwo(src_bits)) { + const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv); + const wide_lock = self.register_manager.lockRegAssumeUnused(wide_reg); + defer self.register_manager.unlockReg(wide_lock); + + try self.truncateRegister(src_ty, wide_reg); + try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg }); + } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); try self.asmCmovccRegisterRegister( diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 7a563c1727..46f736bf74 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -61,7 +61,6 @@ fn assertFalse(b: bool) !void { } test "@clz" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -142,7 +141,6 @@ fn expectVectorsEqual(a: anytype, b: anytype) !void { } test "@ctz" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 42d9789f46e94e17d8ab8d02f356eaaa44fb2822 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 15 May 2023 00:06:36 -0400 Subject: x86_64: fix sysv vector argument passing --- src/arch/x86_64/abi.zig | 30 ++---------------------------- 1 file changed, 2 insertions(+), 28 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index ff1a0ee520..e79424d6d8 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -165,34 +165,6 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { }, .Vector => { const elem_ty = ty.childType(); - if (ctx == .arg) { - const bit_size = ty.bitSize(target); - if (bit_size > 128) { - const has_avx512 = target.cpu.features.isEnabled(@enumToInt(std.Target.x86.Feature.avx512f)); - if (has_avx512 and bit_size <= 512) return .{ - .integer, .integer, .integer, .integer, - .integer, .integer, .integer, .integer, - }; - const has_avx = target.cpu.features.isEnabled(@enumToInt(std.Target.x86.Feature.avx)); - if (has_avx and bit_size <= 256) return .{ - .integer, .integer, .integer, .integer, - .none, .none, .none, .none, - }; - return memory_class; - } - if (bit_size > 80) return .{ - .integer, .integer, .none, .none, - .none, .none, .none, .none, - }; - if (bit_size > 64) return .{ - .x87, .none, .none, .none, - .none, .none, .none, .none, - }; - return .{ - .integer, .none, .none, .none, - .none, .none, .none, .none, - }; - } const bits = elem_ty.bitSize(target) * ty.arrayLen(); if (bits <= 64) return .{ .sse, .none, .none, .none, @@ -202,6 +174,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { .sse, .sseup, .none, .none, .none, .none, .none, .none, }; + if (ctx == .arg and !std.Target.x86.featureSetHas(target.cpu.features, .avx)) return memory_class; if (bits <= 192) return .{ .sse, .sseup, .sseup, .none, .none, .none, .none, .none, @@ -210,6 +183,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { .sse, .sseup, .sseup, .sseup, .none, .none, .none, .none, }; + if (ctx == .arg and !std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return memory_class; if (bits <= 320) return .{ .sse, .sseup, .sseup, .sseup, .sseup, .none, .none, .none, -- cgit v1.2.3 From bd771bec49fbb7845ad2635c0dd13aa971a81fee Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 15 May 2023 00:26:30 -0400 Subject: x86_64: implement integer vector add/sub --- src/arch/x86_64/CodeGen.zig | 80 ++++++++++++++++++++++++++++++++++++++++--- src/arch/x86_64/Encoding.zig | 6 +++- src/arch/x86_64/Mir.zig | 11 ++++++ src/arch/x86_64/encodings.zig | 69 +++++++++++++++++++++++++++++++++++++ 4 files changed, 160 insertions(+), 6 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 9d5f877e14..b791ec5ecc 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -6520,6 +6520,57 @@ fn genBinOp( }, .Vector => switch (lhs_ty.childType().zigTypeTag()) { else => null, + .Int => switch (lhs_ty.childType().intInfo(self.target.*).bits) { + 8 => switch (lhs_ty.vectorLen()) { + 1...16 => switch (air_tag) { + .add, + .addwrap, + => if (self.hasFeature(.avx)) .{ .vp_b, .add } else .{ .p_b, .add }, + .sub, + .subwrap, + => if (self.hasFeature(.avx)) .{ .vp_b, .sub } else .{ .p_b, .sub }, + else => null, + }, + else => null, + }, + 16 => switch (lhs_ty.vectorLen()) { + 1...8 => switch (air_tag) { + .add, + .addwrap, + => if (self.hasFeature(.avx)) .{ .vp_w, .add } else .{ .p_w, .add }, + .sub, + .subwrap, + => if (self.hasFeature(.avx)) .{ .vp_w, .sub } else .{ .p_w, .sub }, + else => null, + }, + else => null, + }, + 32 => switch (lhs_ty.vectorLen()) { + 1...4 => switch (air_tag) { + .add, + .addwrap, + => if (self.hasFeature(.avx)) .{ .vp_d, .add } else .{ .p_d, .add }, + .sub, + .subwrap, + => if (self.hasFeature(.avx)) .{ .vp_d, .sub } else .{ .p_d, .sub }, + else => null, + }, + else => null, + }, + 64 => switch (lhs_ty.vectorLen()) { + 1...2 => switch (air_tag) { + .add, + .addwrap, + => if (self.hasFeature(.avx)) .{ .vp_q, .add } else .{ .p_q, .add }, + .sub, + .subwrap, + => if (self.hasFeature(.avx)) .{ .vp_q, .sub } else .{ .p_q, .sub }, + else => null, + }, + else => null, + }, + else => null, + }, .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen()) { 1 => { @@ -6812,7 +6863,7 @@ fn genBinOp( ); } switch (air_tag) { - .add, .sub, .mul, .div_float, .div_exact => {}, + .add, .addwrap, .sub, .subwrap, .mul, .mulwrap, .div_float, .div_exact => {}, .div_trunc, .div_floor => try self.genRound( lhs_ty, dst_reg, @@ -9043,14 +9094,33 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .{ .register = try self.copyToTmpRegister(ty, src_mcv) }, ), .sse => try self.asmRegisterRegister( - switch (ty.scalarType().zigTypeTag()) { - else => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else .{ ._, .movdqa }, + if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType().zigTypeTag()) { + else => switch (abi_size) { + 1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, + 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, + 9...16 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else .{ ._, .movdqa }, + 17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null, + else => null, + }, .Float => switch (ty.floatBits(self.target.*)) { - else => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else .{ ._, .movdqa }, + 16, 128 => switch (abi_size) { + 2...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, + 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, + 9...16 => if (self.hasFeature(.avx)) + .{ .v_, .movdqa } + else + .{ ._, .movdqa }, + 17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null, + else => null, + }, 32 => if (self.hasFeature(.avx)) .{ .v_ps, .mova } else .{ ._ps, .mova }, 64 => if (self.hasFeature(.avx)) .{ .v_pd, .mova } else .{ ._pd, .mova }, + 80 => null, + else => unreachable, }, - }, + })) |tag| tag else return self.fail("TODO implement genSetReg for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), registerAlias(dst_reg, abi_size), registerAlias(src_reg, abi_size), ), diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 4014947673..c8919d062d 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -262,7 +262,9 @@ pub const Mnemonic = enum { fisttp, fld, // MMX movd, movq, + paddb, paddd, paddq, paddsb, paddsw, paddusb, paddusw, paddw, pand, pandn, por, pxor, + psubb, psubd, psubq, psubsb, psubsw, psubusb, psubusw, psubw, // SSE addps, addss, andps, @@ -341,12 +343,14 @@ pub const Mnemonic = enum { vmovupd, vmovups, vmulpd, vmulps, vmulsd, vmulss, vorpd, vorps, + vpaddb, vpaddd, vpaddq, vpaddsb, vpaddsw, vpaddusb, vpaddusw, vpaddw, vpand, vpandn, vpextrb, vpextrd, vpextrq, vpextrw, vpinsrb, vpinsrd, vpinsrq, vpinsrw, vpor, vpshufhw, vpshuflw, vpsrld, vpsrlq, vpsrlw, + vpsubb, vpsubd, vpsubq, vpsubsb, vpsubsw, vpsubusb, vpsubusw, vpsubw, vpunpckhbw, vpunpckhdq, vpunpckhqdq, vpunpckhwd, vpunpcklbw, vpunpckldq, vpunpcklqdq, vpunpcklwd, vpxor, @@ -746,7 +750,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op } const mnemonic_to_encodings_map = init: { - @setEvalBranchQuota(25_000); + @setEvalBranchQuota(30_000); const encodings = @import("encodings.zig"); var entries = encodings.table; std.sort.sort(encodings.Entry, &entries, {}, struct { diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 4d1f59e454..58eab29958 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -288,6 +288,7 @@ pub const Inst = struct { /// Add with carry adc, /// Add + /// Add packed integers /// Add packed single-precision floating-point values /// Add scalar single-precision floating-point values /// Add packed double-precision floating-point values @@ -420,6 +421,7 @@ pub const Inst = struct { /// Double precision shift right sh, /// Subtract + /// Subtract packed integers /// Subtract packed single-precision floating-point values /// Subtract scalar single-precision floating-point values /// Subtract packed double-precision floating-point values @@ -444,9 +446,18 @@ pub const Inst = struct { /// Bitwise logical xor of packed double-precision floating-point values xor, + /// Add packed signed integers with signed saturation + adds, + /// Add packed unsigned integers with unsigned saturation + addus, /// Bitwise logical and not of packed single-precision floating-point values /// Bitwise logical and not of packed double-precision floating-point values andn, + /// Subtract packed signed integers with signed saturation + subs, + /// Subtract packed unsigned integers with unsigned saturation + subus, + /// Convert packed doubleword integers to packed single-precision floating-point values /// Convert packed doubleword integers to packed double-precision floating-point values cvtpi2, diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 3e57be61ea..820fd715ba 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -992,6 +992,17 @@ pub const table = [_]Entry{ .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .none, .sse2 }, + .{ .paddb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfc }, 0, .none, .sse2 }, + .{ .paddw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfd }, 0, .none, .sse2 }, + .{ .paddd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfe }, 0, .none, .sse2 }, + .{ .paddq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd4 }, 0, .none, .sse2 }, + + .{ .paddsb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xec }, 0, .none, .sse2 }, + .{ .paddsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xed }, 0, .none, .sse2 }, + + .{ .paddusb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdc }, 0, .none, .sse2 }, + .{ .paddusw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdd }, 0, .none, .sse2 }, + .{ .pand, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdb }, 0, .none, .sse2 }, .{ .pandn, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdf }, 0, .none, .sse2 }, @@ -1013,6 +1024,18 @@ pub const table = [_]Entry{ .{ .psrlq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .none, .sse2 }, .{ .psrlq, .mi, &.{ .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .none, .sse2 }, + .{ .psubb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf8 }, 0, .none, .sse2 }, + .{ .psubw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf9 }, 0, .none, .sse2 }, + .{ .psubd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfa }, 0, .none, .sse2 }, + + .{ .psubsb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe8 }, 0, .none, .sse2 }, + .{ .psubsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe9 }, 0, .none, .sse2 }, + + .{ .psubq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfb }, 0, .none, .sse2 }, + + .{ .psubusb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd8 }, 0, .none, .sse2 }, + .{ .psubusw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd9 }, 0, .none, .sse2 }, + .{ .punpckhbw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x68 }, 0, .none, .sse2 }, .{ .punpckhwd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x69 }, 0, .none, .sse2 }, .{ .punpckhdq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6a }, 0, .none, .sse2 }, @@ -1261,6 +1284,17 @@ pub const table = [_]Entry{ .{ .vorps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .vex_128_wig, .avx }, .{ .vorps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x56 }, 0, .vex_256_wig, .avx }, + .{ .vpaddb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfc }, 0, .vex_128_wig, .avx }, + .{ .vpaddw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfd }, 0, .vex_128_wig, .avx }, + .{ .vpaddd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfe }, 0, .vex_128_wig, .avx }, + .{ .vpaddq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd4 }, 0, .vex_128_wig, .avx }, + + .{ .vpaddsb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xec }, 0, .vex_128_wig, .avx }, + .{ .vpaddsw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xed }, 0, .vex_128_wig, .avx }, + + .{ .vpaddusb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdc }, 0, .vex_128_wig, .avx }, + .{ .vpaddusw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdd }, 0, .vex_128_wig, .avx }, + .{ .vpand, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdb }, 0, .vex_128_wig, .avx }, .{ .vpandn, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_128_wig, .avx }, @@ -1287,6 +1321,18 @@ pub const table = [_]Entry{ .{ .vpsrlq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .vex_128_wig, .avx }, .{ .vpsrlq, .vmi, &.{ .xmm, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .vex_128_wig, .avx }, + .{ .vpsubb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf8 }, 0, .vex_128_wig, .avx }, + .{ .vpsubw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xf9 }, 0, .vex_128_wig, .avx }, + .{ .vpsubd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfa }, 0, .vex_128_wig, .avx }, + + .{ .vpsubsb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe8 }, 0, .vex_128_wig, .avx }, + .{ .vpsubsw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe9 }, 0, .vex_128_wig, .avx }, + + .{ .vpsubq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfb }, 0, .vex_128_wig, .avx }, + + .{ .vpsubusb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd8 }, 0, .vex_128_wig, .avx }, + .{ .vpsubusw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd9 }, 0, .vex_128_wig, .avx }, + .{ .vpunpckhbw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x68 }, 0, .vex_128_wig, .avx }, .{ .vpunpckhwd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x69 }, 0, .vex_128_wig, .avx }, .{ .vpunpckhdq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6a }, 0, .vex_128_wig, .avx }, @@ -1376,6 +1422,17 @@ pub const table = [_]Entry{ .{ .vbroadcastss, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx2 }, .{ .vbroadcastsd, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx2 }, + .{ .vpaddb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfc }, 0, .vex_256_wig, .avx2 }, + .{ .vpaddw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfd }, 0, .vex_256_wig, .avx2 }, + .{ .vpaddd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfe }, 0, .vex_256_wig, .avx2 }, + .{ .vpaddq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd4 }, 0, .vex_256_wig, .avx2 }, + + .{ .vpaddsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xec }, 0, .vex_256_wig, .avx2 }, + .{ .vpaddsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xed }, 0, .vex_256_wig, .avx2 }, + + .{ .vpaddusb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdc }, 0, .vex_256_wig, .avx2 }, + .{ .vpaddusw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdd }, 0, .vex_256_wig, .avx2 }, + .{ .vpand, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdb }, 0, .vex_256_wig, .avx2 }, .{ .vpandn, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_256_wig, .avx2 }, @@ -1389,6 +1446,18 @@ pub const table = [_]Entry{ .{ .vpsrlq, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd3 }, 0, .vex_256_wig, .avx2 }, .{ .vpsrlq, .vmi, &.{ .ymm, .ymm, .imm8 }, &.{ 0x66, 0x0f, 0x73 }, 2, .vex_256_wig, .avx2 }, + .{ .vpsubb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xf8 }, 0, .vex_256_wig, .avx2 }, + .{ .vpsubw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xf9 }, 0, .vex_256_wig, .avx2 }, + .{ .vpsubd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfa }, 0, .vex_256_wig, .avx2 }, + + .{ .vpsubsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe8 }, 0, .vex_256_wig, .avx2 }, + .{ .vpsubsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe9 }, 0, .vex_256_wig, .avx2 }, + + .{ .vpsubq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfb }, 0, .vex_256_wig, .avx2 }, + + .{ .vpsubusb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd8 }, 0, .vex_256_wig, .avx2 }, + .{ .vpsubusw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd9 }, 0, .vex_256_wig, .avx2 }, + .{ .vpunpckhbw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x68 }, 0, .vex_256_wig, .avx2 }, .{ .vpunpckhwd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x69 }, 0, .vex_256_wig, .avx2 }, .{ .vpunpckhdq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6a }, 0, .vex_256_wig, .avx2 }, -- cgit v1.2.3 From f39ff6cc68ab7a0d8ef349d4d930118890c19b01 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 15 May 2023 01:15:37 -0400 Subject: x86_64: implement integer vector mul --- src/arch/x86_64/CodeGen.zig | 59 +++++++++++++++++++++++++++++++++++++++++-- src/arch/x86_64/Encoding.zig | 3 +++ src/arch/x86_64/Mir.zig | 4 +++ src/arch/x86_64/encodings.zig | 24 +++++++++++++++--- test/behavior/vector.zig | 3 ++- 5 files changed, 87 insertions(+), 6 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b791ec5ecc..c5af53b2cf 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2800,8 +2800,10 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { const result = result: { const tag = self.air.instructions.items(.tag)[inst]; const dst_ty = self.air.typeOfIndex(inst); - if (dst_ty.zigTypeTag() == .Float) - break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs); + switch (dst_ty.zigTypeTag()) { + .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs), + else => {}, + } const dst_info = dst_ty.intInfo(self.target.*); var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { @@ -6531,6 +6533,15 @@ fn genBinOp( => if (self.hasFeature(.avx)) .{ .vp_b, .sub } else .{ .p_b, .sub }, else => null, }, + 17...32 => switch (air_tag) { + .add, + .addwrap, + => if (self.hasFeature(.avx2)) .{ .vp_b, .add } else null, + .sub, + .subwrap, + => if (self.hasFeature(.avx2)) .{ .vp_b, .sub } else null, + else => null, + }, else => null, }, 16 => switch (lhs_ty.vectorLen()) { @@ -6541,6 +6552,21 @@ fn genBinOp( .sub, .subwrap, => if (self.hasFeature(.avx)) .{ .vp_w, .sub } else .{ .p_w, .sub }, + .mul, + .mulwrap, + => if (self.hasFeature(.avx)) .{ .vp_w, .mull } else .{ .p_d, .mull }, + else => null, + }, + 9...16 => switch (air_tag) { + .add, + .addwrap, + => if (self.hasFeature(.avx2)) .{ .vp_w, .add } else null, + .sub, + .subwrap, + => if (self.hasFeature(.avx2)) .{ .vp_w, .sub } else null, + .mul, + .mulwrap, + => if (self.hasFeature(.avx2)) .{ .vp_w, .mull } else null, else => null, }, else => null, @@ -6553,6 +6579,26 @@ fn genBinOp( .sub, .subwrap, => if (self.hasFeature(.avx)) .{ .vp_d, .sub } else .{ .p_d, .sub }, + .mul, + .mulwrap, + => if (self.hasFeature(.avx)) + .{ .vp_d, .mull } + else if (self.hasFeature(.sse4_1)) + .{ .p_d, .mull } + else + null, + else => null, + }, + 5...8 => switch (air_tag) { + .add, + .addwrap, + => if (self.hasFeature(.avx2)) .{ .vp_d, .add } else null, + .sub, + .subwrap, + => if (self.hasFeature(.avx2)) .{ .vp_d, .sub } else null, + .mul, + .mulwrap, + => if (self.hasFeature(.avx2)) .{ .vp_d, .mull } else null, else => null, }, else => null, @@ -6567,6 +6613,15 @@ fn genBinOp( => if (self.hasFeature(.avx)) .{ .vp_q, .sub } else .{ .p_q, .sub }, else => null, }, + 3...4 => switch (air_tag) { + .add, + .addwrap, + => if (self.hasFeature(.avx2)) .{ .vp_q, .add } else null, + .sub, + .subwrap, + => if (self.hasFeature(.avx2)) .{ .vp_q, .sub } else null, + else => null, + }, else => null, }, else => null, diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index c8919d062d..7b029cdb4f 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -264,6 +264,7 @@ pub const Mnemonic = enum { movd, movq, paddb, paddd, paddq, paddsb, paddsw, paddusb, paddusw, paddw, pand, pandn, por, pxor, + pmulhw, pmullw, psubb, psubd, psubq, psubsb, psubsw, psubusb, psubusw, psubw, // SSE addps, addss, @@ -317,6 +318,7 @@ pub const Mnemonic = enum { insertps, pextrb, pextrd, pextrq, pinsrb, pinsrd, pinsrq, + pmulld, roundpd, roundps, roundsd, roundss, // AVX vaddpd, vaddps, vaddsd, vaddss, @@ -347,6 +349,7 @@ pub const Mnemonic = enum { vpand, vpandn, vpextrb, vpextrd, vpextrq, vpextrw, vpinsrb, vpinsrd, vpinsrq, vpinsrw, + vpmulhw, vpmulld, vpmullw, vpor, vpshufhw, vpshuflw, vpsrld, vpsrlq, vpsrlw, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 58eab29958..a18792e6aa 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -453,6 +453,10 @@ pub const Inst = struct { /// Bitwise logical and not of packed single-precision floating-point values /// Bitwise logical and not of packed double-precision floating-point values andn, + /// Multiply packed signed integers and store low result + mull, + /// Multiply packed signed integers and store high result + mulh, /// Subtract packed signed integers with signed saturation subs, /// Subtract packed unsigned integers with unsigned saturation diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 820fd715ba..86a79596cd 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -1011,6 +1011,10 @@ pub const table = [_]Entry{ .{ .pinsrw, .rmi, &.{ .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .none, .sse2 }, + .{ .pmulhw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .none, .sse2 }, + + .{ .pmullw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .none, .sse2 }, + .{ .por, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xeb }, 0, .none, .sse2 }, .{ .pshufhw, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0xf3, 0x0f, 0x70 }, 0, .none, .sse2 }, @@ -1087,6 +1091,8 @@ pub const table = [_]Entry{ .{ .pinsrd, .rmi, &.{ .xmm, .rm32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .none, .sse4_1 }, .{ .pinsrq, .rmi, &.{ .xmm, .rm64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .long, .sse4_1 }, + .{ .pmulld, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .none, .sse4_1 }, + .{ .roundpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .none, .sse4_1 }, .{ .roundps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x08 }, 0, .none, .sse4_1 }, @@ -1312,6 +1318,12 @@ pub const table = [_]Entry{ .{ .vpinsrw, .rvmi, &.{ .xmm, .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .vex_128_wig, .avx }, + .{ .vpmulhw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_128_wig, .avx }, + + .{ .vpmulld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_128_wig, .avx }, + + .{ .vpmullw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .vex_128_wig, .avx }, + .{ .vpor, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xeb }, 0, .vex_128_wig, .avx }, .{ .vpsrlw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_128_wig, .avx }, @@ -1418,9 +1430,9 @@ pub const table = [_]Entry{ .{ .vfmadd231ss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0x66, 0x0f, 0x38, 0xb9 }, 0, .vex_lig_w0, .fma }, // AVX2 - .{ .vbroadcastss, .rm, &.{ .xmm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_128_w0, .avx2 }, - .{ .vbroadcastss, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx2 }, - .{ .vbroadcastsd, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx2 }, + .{ .vbroadcastss, .rm, &.{ .xmm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_128_w0, .avx2 }, + .{ .vbroadcastss, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx2 }, + .{ .vbroadcastsd, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx2 }, .{ .vpaddb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfc }, 0, .vex_256_wig, .avx2 }, .{ .vpaddw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfd }, 0, .vex_256_wig, .avx2 }, @@ -1437,6 +1449,12 @@ pub const table = [_]Entry{ .{ .vpandn, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_256_wig, .avx2 }, + .{ .vpmulhw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_256_wig, .avx }, + + .{ .vpmulld, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_256_wig, .avx }, + + .{ .vpmullw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .vex_256_wig, .avx }, + .{ .vpor, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xeb }, 0, .vex_256_wig, .avx2 }, .{ .vpsrlw, .rvm, &.{ .ymm, .ymm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd1 }, 0, .vex_256_wig, .avx2 }, diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 87ccdfb567..5d217a5ce0 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -26,7 +26,8 @@ test "implicit cast vector to array - bool" { test "vector wrap operators" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 40457a3696da015fe1396d6c84191b83731910db Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 15 May 2023 01:44:26 -0400 Subject: x86_64: implement integer vector bitwise operations --- src/arch/x86_64/CodeGen.zig | 25 +++++++++++++++++++++++++ test/behavior/vector.zig | 1 - 2 files changed, 25 insertions(+), 1 deletion(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index c5af53b2cf..ed2c596f8f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -6531,6 +6531,9 @@ fn genBinOp( .sub, .subwrap, => if (self.hasFeature(.avx)) .{ .vp_b, .sub } else .{ .p_b, .sub }, + .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, + .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, + .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, else => null, }, 17...32 => switch (air_tag) { @@ -6540,6 +6543,9 @@ fn genBinOp( .sub, .subwrap, => if (self.hasFeature(.avx2)) .{ .vp_b, .sub } else null, + .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, + .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, + .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, else => null, }, else => null, @@ -6555,6 +6561,9 @@ fn genBinOp( .mul, .mulwrap, => if (self.hasFeature(.avx)) .{ .vp_w, .mull } else .{ .p_d, .mull }, + .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, + .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, + .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, else => null, }, 9...16 => switch (air_tag) { @@ -6567,6 +6576,9 @@ fn genBinOp( .mul, .mulwrap, => if (self.hasFeature(.avx2)) .{ .vp_w, .mull } else null, + .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, + .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, + .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, else => null, }, else => null, @@ -6587,6 +6599,9 @@ fn genBinOp( .{ .p_d, .mull } else null, + .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, + .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, + .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, else => null, }, 5...8 => switch (air_tag) { @@ -6599,6 +6614,9 @@ fn genBinOp( .mul, .mulwrap, => if (self.hasFeature(.avx2)) .{ .vp_d, .mull } else null, + .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, + .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, + .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, else => null, }, else => null, @@ -6611,6 +6629,9 @@ fn genBinOp( .sub, .subwrap, => if (self.hasFeature(.avx)) .{ .vp_q, .sub } else .{ .p_q, .sub }, + .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, + .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, + .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, else => null, }, 3...4 => switch (air_tag) { @@ -6620,6 +6641,9 @@ fn genBinOp( .sub, .subwrap, => if (self.hasFeature(.avx2)) .{ .vp_q, .sub } else null, + .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, + .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, + .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, else => null, }, else => null, @@ -6929,6 +6953,7 @@ fn genBinOp( else => unreachable, }, ), + .bit_and, .bit_or, .xor => {}, .max, .min => {}, // TODO: unordered select else => unreachable, } diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 5d217a5ce0..05c9517c20 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -120,7 +120,6 @@ test "vector float operators" { test "vector bit operators" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From cea9ac772a518ff249d47fc2cb7b2776c786ac07 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 15 May 2023 02:55:41 -0400 Subject: x86_64: implement integer vector min/max --- src/arch/x86_64/CodeGen.zig | 100 ++++++++++++++++++++++++++++++++++++++++++ src/arch/x86_64/Encoding.zig | 4 ++ src/arch/x86_64/Mir.zig | 8 ++++ src/arch/x86_64/encodings.zig | 58 ++++++++++++++++++++++++ 4 files changed, 170 insertions(+) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ed2c596f8f..2cd5721258 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -6534,6 +6534,34 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, + .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx)) + .{ .vp_b, .mins } + else if (self.hasFeature(.sse4_1)) + .{ .p_b, .mins } + else + null, + .unsigned => if (self.hasFeature(.avx)) + .{ .vp_b, .minu } + else if (self.hasFeature(.sse4_1)) + .{ .p_b, .minu } + else + null, + }, + .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx)) + .{ .vp_b, .maxs } + else if (self.hasFeature(.sse4_1)) + .{ .p_b, .maxs } + else + null, + .unsigned => if (self.hasFeature(.avx)) + .{ .vp_b, .maxu } + else if (self.hasFeature(.sse4_1)) + .{ .p_b, .maxu } + else + null, + }, else => null, }, 17...32 => switch (air_tag) { @@ -6546,6 +6574,14 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, + .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null, + .unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null, + }, + .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null, + .unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null, + }, else => null, }, else => null, @@ -6564,6 +6600,26 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, + .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx)) + .{ .vp_w, .mins } + else + .{ .p_w, .mins }, + .unsigned => if (self.hasFeature(.avx)) + .{ .vp_w, .minu } + else + .{ .p_w, .minu }, + }, + .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx)) + .{ .vp_w, .maxs } + else + .{ .p_w, .maxs }, + .unsigned => if (self.hasFeature(.avx)) + .{ .vp_w, .maxu } + else + .{ .p_w, .maxu }, + }, else => null, }, 9...16 => switch (air_tag) { @@ -6579,6 +6635,14 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, + .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null, + .unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null, + }, + .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null, + .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null, + }, else => null, }, else => null, @@ -6602,6 +6666,34 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, + .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx)) + .{ .vp_d, .mins } + else if (self.hasFeature(.sse4_1)) + .{ .p_d, .mins } + else + null, + .unsigned => if (self.hasFeature(.avx)) + .{ .vp_d, .minu } + else if (self.hasFeature(.sse4_1)) + .{ .p_d, .minu } + else + null, + }, + .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx)) + .{ .vp_d, .maxs } + else if (self.hasFeature(.sse4_1)) + .{ .p_d, .maxs } + else + null, + .unsigned => if (self.hasFeature(.avx)) + .{ .vp_d, .maxu } + else if (self.hasFeature(.sse4_1)) + .{ .p_d, .maxu } + else + null, + }, else => null, }, 5...8 => switch (air_tag) { @@ -6617,6 +6709,14 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, + .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null, + .unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null, + }, + .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null, + .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null, + }, else => null, }, else => null, diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 7b029cdb4f..52d010880e 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -280,6 +280,7 @@ pub const Mnemonic = enum { mulps, mulss, orps, pextrw, pinsrw, + pmaxsw, pmaxub, pminsw, pminub, shufps, sqrtps, sqrtss, subps, subss, @@ -318,6 +319,7 @@ pub const Mnemonic = enum { insertps, pextrb, pextrd, pextrq, pinsrb, pinsrd, pinsrq, + pmaxsb, pmaxsd, pmaxud, pmaxuw, pminsb, pminsd, pminud, pminuw, pmulld, roundpd, roundps, roundsd, roundss, // AVX @@ -349,6 +351,8 @@ pub const Mnemonic = enum { vpand, vpandn, vpextrb, vpextrd, vpextrq, vpextrw, vpinsrb, vpinsrd, vpinsrq, vpinsrw, + vpmaxsb, vpmaxsd, vpmaxsw, vpmaxub, vpmaxud, vpmaxuw, + vpminsb, vpminsd, vpminsw, vpminub, vpminud, vpminuw, vpmulhw, vpmulld, vpmullw, vpor, vpshufhw, vpshuflw, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index a18792e6aa..4483de858e 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -453,6 +453,14 @@ pub const Inst = struct { /// Bitwise logical and not of packed single-precision floating-point values /// Bitwise logical and not of packed double-precision floating-point values andn, + /// Maximum of packed signed integers + maxs, + /// Maximum of packed unsigned integers + maxu, + /// Minimum of packed signed integers + mins, + /// Minimum of packed unsigned integers + minu, /// Multiply packed signed integers and store low result mull, /// Multiply packed signed integers and store high result diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index 86a79596cd..c326f4230a 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -1011,6 +1011,14 @@ pub const table = [_]Entry{ .{ .pinsrw, .rmi, &.{ .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .none, .sse2 }, + .{ .pmaxsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xee }, 0, .none, .sse2 }, + + .{ .pmaxub, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xde }, 0, .none, .sse2 }, + + .{ .pminsw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xea }, 0, .none, .sse2 }, + + .{ .pminub, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xda }, 0, .none, .sse2 }, + .{ .pmulhw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .none, .sse2 }, .{ .pmullw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .none, .sse2 }, @@ -1091,6 +1099,20 @@ pub const table = [_]Entry{ .{ .pinsrd, .rmi, &.{ .xmm, .rm32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .none, .sse4_1 }, .{ .pinsrq, .rmi, &.{ .xmm, .rm64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x22 }, 0, .long, .sse4_1 }, + .{ .pmaxsb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3c }, 0, .none, .sse4_1 }, + .{ .pmaxsd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3d }, 0, .none, .sse4_1 }, + + .{ .pmaxuw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3e }, 0, .none, .sse4_1 }, + + .{ .pmaxud, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3f }, 0, .none, .sse4_1 }, + + .{ .pminsb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x38 }, 0, .none, .sse4_1 }, + .{ .pminsd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x39 }, 0, .none, .sse4_1 }, + + .{ .pminuw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3a }, 0, .none, .sse4_1 }, + + .{ .pminud, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .none, .sse4_1 }, + .{ .pmulld, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .none, .sse4_1 }, .{ .roundpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x09 }, 0, .none, .sse4_1 }, @@ -1318,6 +1340,24 @@ pub const table = [_]Entry{ .{ .vpinsrw, .rvmi, &.{ .xmm, .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .vex_128_wig, .avx }, + .{ .vpmaxsb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3c }, 0, .vex_128_wig, .avx }, + .{ .vpmaxsw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xee }, 0, .vex_128_wig, .avx }, + .{ .vpmaxsd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3d }, 0, .vex_128_wig, .avx }, + + .{ .vpmaxub, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xde }, 0, .vex_128_wig, .avx }, + .{ .vpmaxuw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3e }, 0, .vex_128_wig, .avx }, + + .{ .vpmaxud, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3f }, 0, .vex_128_wig, .avx }, + + .{ .vpminsb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x38 }, 0, .vex_128_wig, .avx }, + .{ .vpminsw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xea }, 0, .vex_128_wig, .avx }, + .{ .vpminsd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x39 }, 0, .vex_128_wig, .avx }, + + .{ .vpminub, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xda }, 0, .vex_128_wig, .avx }, + .{ .vpminuw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3a }, 0, .vex_128_wig, .avx }, + + .{ .vpminud, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .vex_128_wig, .avx }, + .{ .vpmulhw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_128_wig, .avx }, .{ .vpmulld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_128_wig, .avx }, @@ -1449,6 +1489,24 @@ pub const table = [_]Entry{ .{ .vpandn, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_256_wig, .avx2 }, + .{ .vpmaxsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3c }, 0, .vex_256_wig, .avx }, + .{ .vpmaxsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xee }, 0, .vex_256_wig, .avx }, + .{ .vpmaxsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3d }, 0, .vex_256_wig, .avx }, + + .{ .vpmaxub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xde }, 0, .vex_256_wig, .avx }, + .{ .vpmaxuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3e }, 0, .vex_256_wig, .avx }, + + .{ .vpmaxud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3f }, 0, .vex_256_wig, .avx }, + + .{ .vpminsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x38 }, 0, .vex_256_wig, .avx }, + .{ .vpminsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xea }, 0, .vex_256_wig, .avx }, + .{ .vpminsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x39 }, 0, .vex_256_wig, .avx }, + + .{ .vpminub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xda }, 0, .vex_256_wig, .avx }, + .{ .vpminuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3a }, 0, .vex_256_wig, .avx }, + + .{ .vpminud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .vex_256_wig, .avx }, + .{ .vpmulhw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_256_wig, .avx }, .{ .vpmulld, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_256_wig, .avx }, -- cgit v1.2.3 From 403c2d91bed456085eb685a9f89996c4635ce4b9 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 15 May 2023 20:17:06 -0400 Subject: x86_64: fix float min/max behavior --- src/arch/x86_64/CodeGen.zig | 302 ++++++++++++++++++++++++++++++++++++-- src/arch/x86_64/Encoding.zig | 33 +++-- src/arch/x86_64/Lower.zig | 7 + src/arch/x86_64/Mir.zig | 20 +++ src/arch/x86_64/encoder.zig | 31 ++-- src/arch/x86_64/encodings.zig | 34 +++++ test/behavior/maximum_minimum.zig | 6 +- 7 files changed, 393 insertions(+), 40 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 2cd5721258..7ea0db516b 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1271,6 +1271,27 @@ fn asmRegisterRegisterRegister( }); } +fn asmRegisterRegisterRegisterRegister( + self: *Self, + tag: Mir.Inst.FixedTag, + reg1: Register, + reg2: Register, + reg3: Register, + reg4: Register, +) !void { + _ = try self.addInst(.{ + .tag = tag[1], + .ops = .rrrr, + .data = .{ .rrrr = .{ + .fixes = tag[0], + .r1 = reg1, + .r2 = reg2, + .r3 = reg3, + .r4 = reg4, + } }, + }); +} + fn asmRegisterRegisterRegisterImmediate( self: *Self, tag: Mir.Inst.FixedTag, @@ -6224,12 +6245,26 @@ fn genBinOp( lhs_air: Air.Inst.Ref, rhs_air: Air.Inst.Ref, ) !MCValue { - const lhs_mcv = try self.resolveInst(lhs_air); - const rhs_mcv = try self.resolveInst(rhs_air); const lhs_ty = self.air.typeOf(lhs_air); const rhs_ty = self.air.typeOf(rhs_air); const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*)); + const maybe_mask_reg = switch (air_tag) { + else => null, + .max, .min => if (lhs_ty.scalarType().isRuntimeFloat()) registerAlias( + if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: { + try self.register_manager.getReg(.xmm0, null); + break :mask .xmm0; + } else try self.register_manager.allocReg(null, sse), + abi_size, + ) else null, + }; + const mask_lock = + if (maybe_mask_reg) |mask_reg| self.register_manager.lockRegAssumeUnused(mask_reg) else null; + defer if (mask_lock) |lock| self.register_manager.unlockReg(lock); + + const lhs_mcv = try self.resolveInst(lhs_air); + const rhs_mcv = try self.resolveInst(rhs_air); switch (lhs_mcv) { .immediate => |imm| switch (imm) { 0 => switch (air_tag) { @@ -6300,7 +6335,16 @@ fn genBinOp( }; defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const src_mcv = if (flipped) lhs_mcv else rhs_mcv; + const unmat_src_mcv = if (flipped) lhs_mcv else rhs_mcv; + const src_mcv: MCValue = if (maybe_mask_reg) |mask_reg| + if (self.hasFeature(.avx) and unmat_src_mcv.isRegister() and maybe_inst != null and + self.liveness.operandDies(maybe_inst.?, if (flipped) 0 else 1)) unmat_src_mcv else src: { + try self.genSetReg(mask_reg, rhs_ty, unmat_src_mcv); + break :src .{ .register = mask_reg }; + } + else + unmat_src_mcv; + if (!vec_op) { switch (air_tag) { .add, @@ -7009,18 +7053,26 @@ fn genBinOp( })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), }); + + const lhs_copy_reg = if (maybe_mask_reg) |_| registerAlias( + if (copied_to_dst) try self.copyToTmpRegister(lhs_ty, dst_mcv) else lhs_mcv.getReg().?, + abi_size, + ) else null; + const lhs_copy_lock = if (lhs_copy_reg) |reg| self.register_manager.lockReg(reg) else null; + defer if (lhs_copy_lock) |lock| self.register_manager.unlockReg(lock); + if (self.hasFeature(.avx)) { - const src1_alias = + const lhs_reg = if (copied_to_dst) dst_reg else registerAlias(lhs_mcv.getReg().?, abi_size); if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( mir_tag, dst_reg, - src1_alias, + lhs_reg, src_mcv.mem(Memory.PtrSize.fromSize(abi_size)), ) else try self.asmRegisterRegisterRegister( mir_tag, dst_reg, - src1_alias, + lhs_reg, registerAlias(if (src_mcv.isRegister()) src_mcv.getReg().? else @@ -7041,9 +7093,10 @@ fn genBinOp( try self.copyToTmpRegister(rhs_ty, src_mcv), abi_size), ); } + switch (air_tag) { .add, .addwrap, .sub, .subwrap, .mul, .mulwrap, .div_float, .div_exact => {}, - .div_trunc, .div_floor => try self.genRound( + .div_trunc, .div_floor => if (self.hasFeature(.sse4_1)) try self.genRound( lhs_ty, dst_reg, .{ .register = dst_reg }, @@ -7052,11 +7105,240 @@ fn genBinOp( .div_floor => 0b1_0_01, else => unreachable, }, - ), + ) else return self.fail("TODO implement genBinOp for {s} {} without sse4_1 feature", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), .bit_and, .bit_or, .xor => {}, - .max, .min => {}, // TODO: unordered select + .max, .min => if (maybe_mask_reg) |mask_reg| if (self.hasFeature(.avx)) { + const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size); + + try self.asmRegisterRegisterRegisterImmediate( + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => .{ .v_ss, .cmp }, + 64 => .{ .v_sd, .cmp }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen()) { + 1 => .{ .v_ss, .cmp }, + 2...8 => .{ .v_ps, .cmp }, + else => null, + }, + 64 => switch (lhs_ty.vectorLen()) { + 1 => .{ .v_sd, .cmp }, + 2...4 => .{ .v_pd, .cmp }, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + else => unreachable, + }, + else => unreachable, + })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + mask_reg, + rhs_copy_reg, + rhs_copy_reg, + Immediate.u(3), // unord + ); + try self.asmRegisterRegisterRegisterRegister( + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => .{ .v_ps, .blendv }, + 64 => .{ .v_pd, .blendv }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen()) { + 1...8 => .{ .v_ps, .blendv }, + else => null, + }, + 64 => switch (lhs_ty.vectorLen()) { + 1...4 => .{ .v_pd, .blendv }, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + else => unreachable, + }, + else => unreachable, + })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + dst_reg, + dst_reg, + lhs_copy_reg.?, + mask_reg, + ); + } else { + const has_blend = self.hasFeature(.sse4_1); + try self.asmRegisterRegisterImmediate( + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => .{ ._ss, .cmp }, + 64 => .{ ._sd, .cmp }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen()) { + 1 => .{ ._ss, .cmp }, + 2...4 => .{ ._ps, .cmp }, + else => null, + }, + 64 => switch (lhs_ty.vectorLen()) { + 1 => .{ ._sd, .cmp }, + 2 => .{ ._pd, .cmp }, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + else => unreachable, + }, + else => unreachable, + })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + mask_reg, + mask_reg, + Immediate.u(if (has_blend) 3 else 7), // unord, ord + ); + if (has_blend) try self.asmRegisterRegisterRegister( + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => .{ ._ps, .blendv }, + 64 => .{ ._pd, .blendv }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen()) { + 1...4 => .{ ._ps, .blendv }, + else => null, + }, + 64 => switch (lhs_ty.vectorLen()) { + 1...2 => .{ ._pd, .blendv }, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + else => unreachable, + }, + else => unreachable, + })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + dst_reg, + lhs_copy_reg.?, + mask_reg, + ) else { + try self.asmRegisterRegister( + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => .{ ._ps, .@"and" }, + 64 => .{ ._pd, .@"and" }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen()) { + 1...4 => .{ ._ps, .@"and" }, + else => null, + }, + 64 => switch (lhs_ty.vectorLen()) { + 1...2 => .{ ._pd, .@"and" }, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + else => unreachable, + }, + else => unreachable, + })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + dst_reg, + mask_reg, + ); + try self.asmRegisterRegister( + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => .{ ._ps, .andn }, + 64 => .{ ._pd, .andn }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen()) { + 1...4 => .{ ._ps, .andn }, + else => null, + }, + 64 => switch (lhs_ty.vectorLen()) { + 1...2 => .{ ._pd, .andn }, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + else => unreachable, + }, + else => unreachable, + })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + mask_reg, + lhs_copy_reg.?, + ); + try self.asmRegisterRegister( + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + .Float => switch (lhs_ty.floatBits(self.target.*)) { + 32 => .{ ._ps, .@"or" }, + 64 => .{ ._pd, .@"or" }, + 16, 80, 128 => null, + else => unreachable, + }, + .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen()) { + 1...4 => .{ ._ps, .@"or" }, + else => null, + }, + 64 => switch (lhs_ty.vectorLen()) { + 1...2 => .{ ._pd, .@"or" }, + else => null, + }, + 16, 80, 128 => null, + else => unreachable, + }, + else => unreachable, + }, + else => unreachable, + })) |tag| tag else return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(air_tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), + dst_reg, + mask_reg, + ); + } + }, else => unreachable, } + return dst_mcv; } @@ -9282,7 +9564,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr 17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null, else => null, }, - .Float => switch (ty.floatBits(self.target.*)) { + .Float => switch (ty.scalarType().floatBits(self.target.*)) { 16, 128 => switch (abi_size) { 2...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 52d010880e..0aaf12013d 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -178,7 +178,7 @@ pub fn format( try writer.print("+{s} ", .{tag}); }, .m, .mi, .m1, .mc, .vmi => try writer.print("/{d} ", .{encoding.modRmExt()}), - .mr, .rm, .rmi, .mri, .mrc, .rvm, .rvmi, .mvr => try writer.writeAll("/r "), + .mr, .rm, .rmi, .mri, .mrc, .rm0, .rvm, .rvmr, .rvmi, .mvr => try writer.writeAll("/r "), } switch (encoding.data.op_en) { @@ -202,7 +202,8 @@ pub fn format( }; try writer.print("{s} ", .{tag}); }, - .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm, .mrc, .rvm, .mvr => {}, + .rvmr => try writer.writeAll("/is4 "), + .np, .fd, .td, .o, .m, .m1, .mc, .mr, .rm, .mrc, .rm0, .rvm, .mvr => {}, } try writer.print("{s} ", .{@tagName(encoding.mnemonic)}); @@ -270,7 +271,7 @@ pub const Mnemonic = enum { addps, addss, andps, andnps, - cmpss, + cmpps, cmpss, cvtpi2ps, cvtps2pi, cvtsi2ss, cvtss2si, cvttps2pi, cvttss2si, divps, divss, maxps, maxss, @@ -290,7 +291,7 @@ pub const Mnemonic = enum { addpd, addsd, andpd, andnpd, - //cmpsd, + cmppd, //cmpsd, cvtdq2pd, cvtdq2ps, cvtpd2dq, cvtpd2pi, cvtpd2ps, cvtpi2pd, cvtps2dq, cvtps2pd, cvtsd2si, cvtsd2ss, cvtsi2sd, cvtss2sd, cvttpd2dq, cvttpd2pi, cvttps2dq, cvttsd2si, @@ -315,6 +316,7 @@ pub const Mnemonic = enum { // SSE3 movddup, movshdup, movsldup, // SSE4.1 + blendpd, blendps, blendvpd, blendvps, extractps, insertps, pextrb, pextrd, pextrq, @@ -325,7 +327,9 @@ pub const Mnemonic = enum { // AVX vaddpd, vaddps, vaddsd, vaddss, vandnpd, vandnps, vandpd, vandps, + vblendpd, vblendps, vblendvpd, vblendvps, vbroadcastf128, vbroadcastsd, vbroadcastss, + vcmppd, vcmpps, vcmpsd, vcmpss, vcvtdq2pd, vcvtdq2ps, vcvtpd2dq, vcvtpd2ps, vcvtps2dq, vcvtps2pd, vcvtsd2si, vcvtsd2ss, vcvtsi2sd, vcvtsi2ss, vcvtss2sd, vcvtss2si, @@ -385,7 +389,7 @@ pub const OpEn = enum { fd, td, m1, mc, mi, mr, rm, rmi, mri, mrc, - vmi, rvm, rvmi, mvr, + rm0, vmi, rvm, rvmr, rvmi, mvr, // zig fmt: on }; @@ -407,7 +411,7 @@ pub const Op = enum { moffs, sreg, st, mm, mm_m64, - xmm, xmm_m32, xmm_m64, xmm_m128, + xmm0, xmm, xmm_m32, xmm_m64, xmm_m128, ymm, ymm_m256, // zig fmt: on @@ -436,7 +440,9 @@ pub const Op = enum { .segment => .sreg, .x87 => .st, .mmx => .mm, - .sse => switch (reg.bitSize()) { + .sse => if (reg == .xmm0) + .xmm0 + else switch (reg.bitSize()) { 128 => .xmm, 256 => .ymm, else => unreachable, @@ -494,7 +500,7 @@ pub const Op = enum { .eax, .r32, .rm32, .r32_m16 => unreachable, .rax, .r64, .rm64, .r64_m16 => unreachable, .st, .mm, .mm_m64 => unreachable, - .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => unreachable, + .xmm0, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => unreachable, .ymm, .ymm_m256 => unreachable, .m8, .m16, .m32, .m64, .m80, .m128, .m256 => unreachable, .unity => 1, @@ -516,7 +522,7 @@ pub const Op = enum { .eax, .r32, .rm32, .r32_m8, .r32_m16 => 32, .rax, .r64, .rm64, .r64_m16, .mm, .mm_m64 => 64, .st => 80, - .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => 128, + .xmm0, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => 128, .ymm, .ymm_m256 => 256, }; } @@ -526,7 +532,8 @@ pub const Op = enum { .none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable, .unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable, .rel8, .rel16, .rel32 => unreachable, - .al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64, .st, .mm, .xmm, .ymm => unreachable, + .al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64 => unreachable, + .st, .mm, .xmm0, .xmm, .ymm => unreachable, .m8, .rm8, .r32_m8 => 8, .m16, .rm16, .r32_m16, .r64_m16 => 16, .m32, .rm32, .xmm_m32 => 32, @@ -558,7 +565,7 @@ pub const Op = enum { .rm8, .rm16, .rm32, .rm64, .r32_m8, .r32_m16, .r64_m16, .st, .mm, .mm_m64, - .xmm, .xmm_m32, .xmm_m64, .xmm_m128, + .xmm0, .xmm, .xmm_m32, .xmm_m64, .xmm_m128, .ymm, .ymm_m256, => true, else => false, @@ -612,7 +619,7 @@ pub const Op = enum { .sreg => .segment, .st => .x87, .mm, .mm_m64 => .mmx, - .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => .sse, + .xmm0, .xmm, .xmm_m32, .xmm_m64, .xmm_m128 => .sse, .ymm, .ymm_m256 => .sse, }; } @@ -629,7 +636,7 @@ pub const Op = enum { else => { if (op.isRegister() and target.isRegister()) { return switch (target) { - .cl, .al, .ax, .eax, .rax => op == target, + .cl, .al, .ax, .eax, .rax, .xmm0 => op == target, else => op.class() == target.class() and op.regBitSize() == target.regBitSize(), }; } diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index 65d2b64398..d77ddf3050 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -377,6 +377,7 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void { .r => inst.data.r.fixes, .rr => inst.data.rr.fixes, .rrr => inst.data.rrr.fixes, + .rrrr => inst.data.rrrr.fixes, .rrri => inst.data.rrri.fixes, .rri_s, .rri_u => inst.data.rri.fixes, .ri_s, .ri_u => inst.data.ri.fixes, @@ -430,6 +431,12 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void { .{ .reg = inst.data.rrr.r2 }, .{ .reg = inst.data.rrr.r3 }, }, + .rrrr => &.{ + .{ .reg = inst.data.rrrr.r1 }, + .{ .reg = inst.data.rrrr.r2 }, + .{ .reg = inst.data.rrrr.r3 }, + .{ .reg = inst.data.rrrr.r4 }, + }, .rrri => &.{ .{ .reg = inst.data.rrri.r1 }, .{ .reg = inst.data.rrri.r2 }, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 4483de858e..9f59a2afba 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -596,6 +596,16 @@ pub const Inst = struct { /// Replicate single floating-point values movsldup, + /// Blend packed single-precision floating-point values + /// Blend scalar single-precision floating-point values + /// Blend packed double-precision floating-point values + /// Blend scalar double-precision floating-point values + blend, + /// Variable blend packed single-precision floating-point values + /// Variable blend scalar single-precision floating-point values + /// Variable blend packed double-precision floating-point values + /// Variable blend scalar double-precision floating-point values + blendv, /// Extract packed floating-point values extract, /// Insert scalar single-precision floating-point value @@ -651,6 +661,9 @@ pub const Inst = struct { /// Register, register, register operands. /// Uses `rrr` payload. rrr, + /// Register, register, register, register operands. + /// Uses `rrrr` payload. + rrrr, /// Register, register, register, immediate (byte) operands. /// Uses `rrri` payload. rrri, @@ -870,6 +883,13 @@ pub const Inst = struct { r2: Register, r3: Register, }, + rrrr: struct { + fixes: Fixes = ._, + r1: Register, + r2: Register, + r3: Register, + r4: Register, + }, rrri: struct { fixes: Fixes = ._, r1: Register, diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index 0ce875240d..5f9a2f49b3 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -226,8 +226,8 @@ pub const Instruction = struct { else => { const mem_op = switch (data.op_en) { .m, .mi, .m1, .mc, .mr, .mri, .mrc, .mvr => inst.ops[0], - .rm, .rmi, .vmi => inst.ops[1], - .rvm, .rvmi => inst.ops[2], + .rm, .rmi, .rm0, .vmi => inst.ops[1], + .rvm, .rvmr, .rvmi => inst.ops[2], else => unreachable, }; switch (mem_op) { @@ -235,7 +235,7 @@ pub const Instruction = struct { const rm = switch (data.op_en) { .m, .mi, .m1, .mc, .vmi => enc.modRmExt(), .mr, .mri, .mrc => inst.ops[1].reg.lowEnc(), - .rm, .rmi, .rvm, .rvmi => inst.ops[0].reg.lowEnc(), + .rm, .rmi, .rm0, .rvm, .rvmr, .rvmi => inst.ops[0].reg.lowEnc(), .mvr => inst.ops[2].reg.lowEnc(), else => unreachable, }; @@ -245,7 +245,7 @@ pub const Instruction = struct { const op = switch (data.op_en) { .m, .mi, .m1, .mc, .vmi => .none, .mr, .mri, .mrc => inst.ops[1], - .rm, .rmi, .rvm, .rvmi => inst.ops[0], + .rm, .rmi, .rm0, .rvm, .rvmr, .rvmi => inst.ops[0], .mvr => inst.ops[2], else => unreachable, }; @@ -257,6 +257,7 @@ pub const Instruction = struct { switch (data.op_en) { .mi => try encodeImm(inst.ops[1].imm, data.ops[1], encoder), .rmi, .mri, .vmi => try encodeImm(inst.ops[2].imm, data.ops[2], encoder), + .rvmr => try encoder.imm8(@as(u8, inst.ops[3].reg.enc()) << 4), .rvmi => try encodeImm(inst.ops[3].imm, data.ops[3], encoder), else => {}, } @@ -298,7 +299,7 @@ pub const Instruction = struct { .i, .zi, .o, .oi, .d, .np => null, .fd => inst.ops[1].mem.base().reg, .td => inst.ops[0].mem.base().reg, - .rm, .rmi => if (inst.ops[1].isSegmentRegister()) + .rm, .rmi, .rm0 => if (inst.ops[1].isSegmentRegister()) switch (inst.ops[1]) { .reg => |reg| reg, .mem => |mem| mem.base().reg, @@ -314,7 +315,7 @@ pub const Instruction = struct { } else null, - .vmi, .rvm, .rvmi, .mvr => unreachable, + .vmi, .rvm, .rvmr, .rvmi, .mvr => unreachable, }; if (segment_override) |seg| { legacy.setSegmentOverride(seg); @@ -333,23 +334,23 @@ pub const Instruction = struct { switch (op_en) { .np, .i, .zi, .fd, .td, .d => {}, .o, .oi => rex.b = inst.ops[0].reg.isExtended(), - .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc => { + .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc, .rm0 => { const r_op = switch (op_en) { - .rm, .rmi => inst.ops[0], + .rm, .rmi, .rm0 => inst.ops[0], .mr, .mri, .mrc => inst.ops[1], else => .none, }; rex.r = r_op.isBaseExtended(); const b_x_op = switch (op_en) { - .rm, .rmi => inst.ops[1], + .rm, .rmi, .rm0 => inst.ops[1], .m, .mi, .m1, .mc, .mr, .mri, .mrc => inst.ops[0], else => unreachable, }; rex.b = b_x_op.isBaseExtended(); rex.x = b_x_op.isIndexExtended(); }, - .vmi, .rvm, .rvmi, .mvr => unreachable, + .vmi, .rvm, .rvmr, .rvmi, .mvr => unreachable, } try encoder.rex(rex); @@ -367,9 +368,9 @@ pub const Instruction = struct { switch (op_en) { .np, .i, .zi, .fd, .td, .d => {}, .o, .oi => vex.b = inst.ops[0].reg.isExtended(), - .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc, .vmi, .rvm, .rvmi, .mvr => { + .m, .mi, .m1, .mc, .mr, .rm, .rmi, .mri, .mrc, .rm0, .vmi, .rvm, .rvmr, .rvmi, .mvr => { const r_op = switch (op_en) { - .rm, .rmi, .rvm, .rvmi => inst.ops[0], + .rm, .rmi, .rm0, .rvm, .rvmr, .rvmi => inst.ops[0], .mr, .mri, .mrc => inst.ops[1], .mvr => inst.ops[2], .m, .mi, .m1, .mc, .vmi => .none, @@ -378,9 +379,9 @@ pub const Instruction = struct { vex.r = r_op.isBaseExtended(); const b_x_op = switch (op_en) { - .rm, .rmi, .vmi => inst.ops[1], + .rm, .rmi, .rm0, .vmi => inst.ops[1], .m, .mi, .m1, .mc, .mr, .mri, .mrc, .mvr => inst.ops[0], - .rvm, .rvmi => inst.ops[2], + .rvm, .rvmr, .rvmi => inst.ops[2], else => unreachable, }; vex.b = b_x_op.isBaseExtended(); @@ -408,7 +409,7 @@ pub const Instruction = struct { switch (op_en) { else => {}, .vmi => vex.v = inst.ops[0].reg, - .rvm, .rvmi => vex.v = inst.ops[1].reg, + .rvm, .rvmr, .rvmi => vex.v = inst.ops[1].reg, } try encoder.vex(vex); diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index c326f4230a..e087f6dfc7 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -846,6 +846,8 @@ pub const table = [_]Entry{ .{ .andps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x54 }, 0, .none, .sse }, + .{ .cmpps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x0f, 0xc2 }, 0, .none, .sse }, + .{ .cmpss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0xf3, 0x0f, 0xc2 }, 0, .none, .sse }, .{ .cvtpi2ps, .rm, &.{ .xmm, .mm_m64 }, &.{ 0x0f, 0x2a }, 0, .none, .sse }, @@ -917,6 +919,8 @@ pub const table = [_]Entry{ .{ .andpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x54 }, 0, .none, .sse2 }, + .{ .cmppd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0xc2 }, 0, .none, .sse2 }, + .{ .cmpsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0xf2, 0x0f, 0xc2 }, 0, .none, .sse2 }, .{ .cvtdq2pd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0xe6 }, 0, .none, .sse2 }, @@ -1085,6 +1089,14 @@ pub const table = [_]Entry{ .{ .movsldup, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x12 }, 0, .none, .sse3 }, // SSE4.1 + .{ .blendpd, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0d }, 0, .none, .sse4_1 }, + + .{ .blendps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0c }, 0, .none, .sse4_1 }, + + .{ .blendvpd, .rm0, &.{ .xmm, .xmm_m128, .xmm0 }, &.{ 0x66, 0x0f, 0x38, 0x15 }, 0, .none, .sse4_1 }, + + .{ .blendvps, .rm0, &.{ .xmm, .xmm_m128, .xmm0 }, &.{ 0x66, 0x0f, 0x38, 0x14 }, 0, .none, .sse4_1 }, + .{ .extractps, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x17 }, 0, .none, .sse4_1 }, .{ .insertps, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x21 }, 0, .none, .sse4_1 }, @@ -1146,11 +1158,33 @@ pub const table = [_]Entry{ .{ .vandps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x54 }, 0, .vex_128_wig, .avx }, .{ .vandps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x54 }, 0, .vex_256_wig, .avx }, + .{ .vblendpd, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0d }, 0, .vex_128_wig, .avx }, + .{ .vblendpd, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0d }, 0, .vex_256_wig, .avx }, + + .{ .vblendps, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0c }, 0, .vex_128_wig, .avx }, + .{ .vblendps, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0c }, 0, .vex_256_wig, .avx }, + + .{ .vblendvpd, .rvmr, &.{ .xmm, .xmm, .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x3a, 0x4b }, 0, .vex_128_w0, .avx }, + .{ .vblendvpd, .rvmr, &.{ .ymm, .ymm, .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x3a, 0x4b }, 0, .vex_256_w0, .avx }, + + .{ .vblendvps, .rvmr, &.{ .xmm, .xmm, .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x3a, 0x4a }, 0, .vex_128_w0, .avx }, + .{ .vblendvps, .rvmr, &.{ .ymm, .ymm, .ymm_m256, .ymm }, &.{ 0x66, 0x0f, 0x3a, 0x4a }, 0, .vex_256_w0, .avx }, + .{ .vbroadcastss, .rm, &.{ .xmm, .m32 }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_128_w0, .avx }, .{ .vbroadcastss, .rm, &.{ .ymm, .m32 }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx }, .{ .vbroadcastsd, .rm, &.{ .ymm, .m64 }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx }, .{ .vbroadcastf128, .rm, &.{ .ymm, .m128 }, &.{ 0x66, 0x0f, 0x38, 0x1a }, 0, .vex_256_w0, .avx }, + .{ .vcmppd, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x66, 0x0f, 0xc2 }, 0, .vex_128_wig, .avx }, + .{ .vcmppd, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x66, 0x0f, 0xc2 }, 0, .vex_256_wig, .avx }, + + .{ .vcmpps, .rvmi, &.{ .xmm, .xmm, .xmm_m128, .imm8 }, &.{ 0x0f, 0xc2 }, 0, .vex_128_wig, .avx }, + .{ .vcmpps, .rvmi, &.{ .ymm, .ymm, .ymm_m256, .imm8 }, &.{ 0x0f, 0xc2 }, 0, .vex_256_wig, .avx }, + + .{ .vcmpsd, .rvmi, &.{ .xmm, .xmm, .xmm_m64, .imm8 }, &.{ 0xf2, 0x0f, 0xc2 }, 0, .vex_lig_wig, .avx }, + + .{ .vcmpss, .rvmi, &.{ .xmm, .xmm, .xmm_m32, .imm8 }, &.{ 0xf3, 0x0f, 0xc2 }, 0, .vex_lig_wig, .avx }, + .{ .vcvtdq2pd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0xe6 }, 0, .vex_128_wig, .avx }, .{ .vcvtdq2pd, .rm, &.{ .ymm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0xe6 }, 0, .vex_256_wig, .avx }, diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index ecfe596760..db6cad221f 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -24,7 +24,8 @@ test "@max" { test "@max on vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -72,7 +73,8 @@ test "@min" { test "@min for vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64 and + !comptime std.Target.x86.featureSetHas(builtin.cpu.features, .sse4_1)) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 729daed591ba6884ed1f907166abadf8fad26741 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 15 May 2023 23:34:57 -0400 Subject: x86_64: rewrite casts --- src/arch/x86_64/CodeGen.zig | 274 ++++++++++++++++++++----------------------- test/behavior/bitcast.zig | 2 - test/behavior/bugs/13128.zig | 1 - test/behavior/widening.zig | 1 - 4 files changed, 130 insertions(+), 148 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 7ea0db516b..6063c38074 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2224,6 +2224,10 @@ fn getFrameAddrAlignment(self: *Self, frame_addr: FrameAddr) u32 { return @min(alloc_align, @bitCast(u32, frame_addr.off) & (alloc_align - 1)); } +fn getFrameAddrSize(self: *Self, frame_addr: FrameAddr) u32 { + return self.frame_allocs.get(@enumToInt(frame_addr.index)).abi_size - @intCast(u31, frame_addr.off); +} + fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { const frame_allocs_slice = self.frame_allocs.slice(); const frame_size = frame_allocs_slice.items(.abi_size); @@ -2615,87 +2619,90 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = result: { + const src_ty = self.air.typeOf(ty_op.operand); + const src_int_info = src_ty.intInfo(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); - const src_int_info = src_ty.intInfo(self.target.*); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); - const src_mcv = try self.resolveInst(ty_op.operand); - const src_lock = switch (src_mcv) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (src_lock) |lock| self.register_manager.unlockReg(lock); + const dst_ty = self.air.typeOfIndex(inst); + const dst_int_info = dst_ty.intInfo(self.target.*); + const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); - const dst_ty = self.air.typeOfIndex(inst); - const dst_int_info = dst_ty.intInfo(self.target.*); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); - const dst_mcv = if (dst_abi_size <= src_abi_size and - self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) - src_mcv - else - try self.allocRegOrMem(inst, true); + const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; + const extend = switch (src_int_info.signedness) { + .signed => dst_int_info, + .unsigned => src_int_info, + }.signedness; - const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; - const signedness: std.builtin.Signedness = if (dst_int_info.signedness == .signed and - src_int_info.signedness == .signed) .signed else .unsigned; - switch (dst_mcv) { - .register => |dst_reg| { - const min_abi_size = @min(dst_abi_size, src_abi_size); - const tag: Mir.Inst.FixedTag = switch (signedness) { - .signed => if (min_abi_size >= 4) .{ ._d, .movsx } else .{ ._, .movsx }, - .unsigned => if (min_abi_size >= 4) .{ ._, .mov } else .{ ._, .movzx }, - }; - const dst_alias = switch (tag[1]) { - .movsx => dst_reg.to64(), - .mov, .movzx => if (min_abi_size > 4) dst_reg.to64() else dst_reg.to32(), - else => unreachable, + const src_mcv = try self.resolveInst(ty_op.operand); + const src_storage_bits = switch (src_mcv) { + .register, .register_offset => 64, + .load_frame => |frame_addr| self.getFrameAddrSize(frame_addr) * 8, + else => src_int_info.bits, + }; + + const dst_mcv = if (dst_int_info.bits <= src_storage_bits and + self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_mcv else dst: { + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.genCopy(min_ty, dst_mcv, src_mcv); + break :dst dst_mcv; + }; + + if (dst_int_info.bits <= src_int_info.bits) break :result if (dst_mcv.isRegister()) + .{ .register = registerAlias(dst_mcv.getReg().?, abi_size) } + else + dst_mcv; + + if (dst_mcv.isRegister()) { + try self.truncateRegister(src_ty, dst_mcv.getReg().?); + break :result .{ .register = registerAlias(dst_mcv.getReg().?, abi_size) }; + } + + const src_limbs_len = std.math.divCeil(u16, src_int_info.bits, 64) catch unreachable; + const dst_limbs_len = std.math.divCeil(u16, dst_int_info.bits, 64) catch unreachable; + + const high_mcv = dst_mcv.address().offset((src_limbs_len - 1) * 8).deref(); + const high_reg = try self.copyToTmpRegister(switch (src_int_info.signedness) { + .signed => Type.isize, + .unsigned => Type.usize, + }, high_mcv); + const high_lock = self.register_manager.lockRegAssumeUnused(high_reg); + defer self.register_manager.unlockReg(high_lock); + + const high_bits = src_int_info.bits % 64; + if (high_bits > 0) { + var high_pl = Type.Payload.Bits{ + .base = .{ .tag = switch (extend) { + .signed => .int_signed, + .unsigned => .int_unsigned, + } }, + .data = high_bits, }; - switch (src_mcv) { - .register => |src_reg| { - try self.asmRegisterRegister( - tag, - dst_alias, - registerAlias(src_reg, min_abi_size), + const high_ty = Type.initPayload(&high_pl.base); + try self.truncateRegister(high_ty, high_reg); + try self.genCopy(Type.usize, high_mcv, .{ .register = high_reg }); + } + + if (dst_limbs_len > src_limbs_len) try self.genInlineMemset( + dst_mcv.address().offset(src_limbs_len * 8), + switch (extend) { + .signed => extend: { + const extend_mcv = MCValue{ .register = high_reg }; + try self.genShiftBinOpMir( + .{ ._r, .sa }, + Type.isize, + extend_mcv, + .{ .immediate = 63 }, ); + break :extend extend_mcv; }, - .memory, .indirect, .load_frame => try self.asmRegisterMemory( - tag, - dst_alias, - src_mcv.mem(Memory.PtrSize.fromSize(min_abi_size)), - ), - else => return self.fail("TODO airIntCast from {s} to {s}", .{ - @tagName(src_mcv), - @tagName(dst_mcv), - }), - } - if (self.regExtraBits(min_ty) > 0) try self.truncateRegister(min_ty, dst_reg); - }, - else => { - try self.genCopy(min_ty, dst_mcv, src_mcv); - const extra = dst_abi_size * 8 - dst_int_info.bits; - if (extra > 0) { - try self.genShiftBinOpMir( - switch (signedness) { - .signed => .{ ._l, .sa }, - .unsigned => .{ ._l, .sh }, - }, - dst_ty, - dst_mcv, - .{ .immediate = extra }, - ); - try self.genShiftBinOpMir( - switch (signedness) { - .signed => .{ ._r, .sa }, - .unsigned => .{ ._r, .sh }, - }, - dst_ty, - dst_mcv, - .{ .immediate = extra }, - ); - } - }, - } - return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); + .unsigned => .{ .immediate = 0 }, + }, + .{ .immediate = (dst_limbs_len - src_limbs_len) * 8 }, + ); + + break :result dst_mcv; + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { @@ -9879,63 +9886,6 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal } } -/// Like `genInlineMemcpy` but copies value from a register to an address via dereferencing -/// of destination register. -/// Boils down to MOV r/m64, r64. -fn genInlineMemcpyRegisterRegister( - self: *Self, - ty: Type, - dst_reg: Register, - src_reg: Register, - offset: i32, -) InnerError!void { - assert(dst_reg.bitSize() == 64); - - const dst_reg_lock = self.register_manager.lockReg(dst_reg); - defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); - - const src_reg_lock = self.register_manager.lockReg(src_reg); - defer if (src_reg_lock) |lock| self.register_manager.unlockReg(lock); - - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - - if (!math.isPowerOfTwo(abi_size)) { - const tmp_reg = try self.copyToTmpRegister(ty, .{ .register = src_reg }); - - var next_offset = offset; - var remainder = abi_size; - while (remainder > 0) { - const nearest_power_of_two = @as(u6, 1) << math.log2_int(u3, @intCast(u3, remainder)); - try self.asmMemoryRegister( - .{ ._, .mov }, - Memory.sib(Memory.PtrSize.fromSize(nearest_power_of_two), .{ - .base = dst_reg, - .disp = -next_offset, - }), - registerAlias(tmp_reg, nearest_power_of_two), - ); - - if (nearest_power_of_two > 1) { - try self.genShiftBinOpMir(.{ ._r, .sh }, ty, .{ .register = tmp_reg }, .{ - .immediate = nearest_power_of_two * 8, - }); - } - - remainder -= nearest_power_of_two; - next_offset -= nearest_power_of_two; - } - } else { - try self.asmMemoryRegister( - switch (src_reg.class()) { - .general_purpose, .segment => .{ ._, .mov }, - .sse => .{ ._ss, .mov }, - }, - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = dst_reg, .disp = -offset }), - registerAlias(src_reg, abi_size), - ); - } -} - fn genInlineMemcpy(self: *Self, dst_ptr: MCValue, src_ptr: MCValue, len: MCValue) InnerError!void { try self.spillRegisters(&.{ .rdi, .rsi, .rcx }); try self.genSetReg(.rdi, Type.usize, dst_ptr); @@ -10036,20 +9986,56 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const result = result: { const dst_rc = regClassForType(dst_ty); const src_rc = regClassForType(src_ty); - const operand = try self.resolveInst(ty_op.operand); - if (dst_rc.supersetOf(src_rc) and self.reuseOperand(inst, ty_op.operand, 0, operand)) - break :result operand; + const src_mcv = try self.resolveInst(ty_op.operand); + if (dst_rc.supersetOf(src_rc) and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) + break :result src_mcv; - const operand_lock = switch (operand) { - .register => |reg| self.register_manager.lockReg(reg), - .register_overflow => |ro| self.register_manager.lockReg(ro.reg), - else => null, - }; - defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); + const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; + defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const dest = try self.allocRegOrMem(inst, true); - try self.genCopy(if (!dest.isMemory() or operand.isMemory()) dst_ty else src_ty, dest, operand); - break :result dest; + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.genCopy( + if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty, + dst_mcv, + src_mcv, + ); + + const dst_signedness = + if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + const src_signedness = + if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + const abi_size = @intCast(u16, dst_ty.abiSize(self.target.*)); + const bit_size = @intCast(u16, dst_ty.bitSize(self.target.*)); + const dst_limbs_len = std.math.divCeil(u16, bit_size, 64) catch unreachable; + if (dst_signedness != src_signedness and abi_size * 8 > bit_size) { + const high_reg = if (dst_mcv.isRegister()) + dst_mcv.getReg().? + else + try self.copyToTmpRegister( + Type.usize, + dst_mcv.address().offset((dst_limbs_len - 1) * 8).deref(), + ); + const high_lock = self.register_manager.lockReg(high_reg); + defer if (high_lock) |lock| self.register_manager.unlockReg(lock); + + var high_pl = Type.Payload.Bits{ + .base = .{ .tag = switch (dst_signedness) { + .signed => .int_signed, + .unsigned => .int_unsigned, + } }, + .data = bit_size % 64, + }; + const high_ty = Type.initPayload(&high_pl.base); + + try self.truncateRegister(high_ty, high_reg); + if (!dst_mcv.isRegister()) try self.genCopy( + Type.usize, + dst_mcv.address().offset((dst_limbs_len - 1) * 8).deref(), + .{ .register = high_reg }, + ); + } + + break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 2dcba00c40..4b8c363ac2 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -35,7 +35,6 @@ test "@bitCast iX -> uX (8, 16, 128)" { test "@bitCast iX -> uX exotic integers" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -82,7 +81,6 @@ fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signe test "bitcast uX to bytes" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/bugs/13128.zig b/test/behavior/bugs/13128.zig index a378b42818..944fa52c8a 100644 --- a/test/behavior/bugs/13128.zig +++ b/test/behavior/bugs/13128.zig @@ -14,7 +14,6 @@ fn foo(val: U) !void { test "runtime union init, most-aligned field != largest" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig index 12076697d8..d3efa73940 100644 --- a/test/behavior/widening.zig +++ b/test/behavior/widening.zig @@ -5,7 +5,6 @@ const builtin = @import("builtin"); const has_f80_rt = @import("builtin").cpu.arch == .x86_64; test "integer widening" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 80df8da82f793c87217ec673ff980751461f8164 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 15 May 2023 23:58:17 -0400 Subject: x86_64: initialize array sentinels --- src/arch/x86_64/CodeGen.zig | 6 ++++++ test/behavior/eval.zig | 2 -- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 6063c38074..2e0c5f64f8 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -11162,6 +11162,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_off = @intCast(i32, elem_size * elem_i); try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv); } + if (result_ty.sentinel()) |sentinel| try self.genSetMem( + .{ .frame = frame_index }, + @intCast(i32, elem_size * elements.len), + elem_ty, + try self.genTypedValue(.{ .ty = elem_ty, .val = sentinel }), + ); break :result .{ .load_frame = .{ .index = frame_index } }; }, .Vector => return self.fail("TODO implement aggregate_init for vectors", .{}), diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index f4c75149a8..d22eba4fa0 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -816,7 +816,6 @@ test "array concatenation peer resolves element types - pointer" { test "array concatenation sets the sentinel - value" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -855,7 +854,6 @@ test "array concatenation sets the sentinel - pointer" { test "array multiplication sets the sentinel - value" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 36ddab03fa5b29248a7e8fe1770414dd0a4cc833 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 16 May 2023 00:55:46 -0400 Subject: x86_64: fix multi-limb compare --- src/arch/x86_64/CodeGen.zig | 111 ++++++++++++++++++++++++++++++++++++++------ test/behavior/floatop.zig | 1 - 2 files changed, 96 insertions(+), 16 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 2e0c5f64f8..3033048a37 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8171,24 +8171,105 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const result = MCValue{ .eflags = switch (ty.zigTypeTag()) { else => result: { - var flipped = false; - const dst_mcv: MCValue = if (lhs_mcv.isRegister() or lhs_mcv.isMemory()) - lhs_mcv - else if (rhs_mcv.isRegister() or rhs_mcv.isMemory()) dst: { - flipped = true; - break :dst rhs_mcv; - } else .{ .register = try self.copyToTmpRegister(ty, lhs_mcv) }; - const dst_lock = switch (dst_mcv) { - .register => |reg| self.register_manager.lockReg(reg), - else => null, + const abi_size = @intCast(u16, ty.abiSize(self.target.*)); + const may_flip: enum { + may_flip, + must_flip, + must_not_flip, + } = if (abi_size > 8) switch (op) { + .lt, .gte => .must_not_flip, + .lte, .gt => .must_flip, + .eq, .neq => .may_flip, + } else .may_flip; + + const flipped = switch (may_flip) { + .may_flip => !lhs_mcv.isRegister() and !lhs_mcv.isMemory(), + .must_flip => true, + .must_not_flip => false, + }; + const unmat_dst_mcv = if (flipped) rhs_mcv else lhs_mcv; + const dst_mcv = if (unmat_dst_mcv.isRegister() or + (abi_size <= 8 and unmat_dst_mcv.isMemory())) unmat_dst_mcv else dst: { + const dst_mcv = try self.allocTempRegOrMem(ty, true); + try self.genCopy(ty, dst_mcv, unmat_dst_mcv); + break :dst dst_mcv; }; + const dst_lock = + if (dst_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + const src_mcv = if (flipped) lhs_mcv else rhs_mcv; + const src_lock = + if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; + defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - try self.genBinOpMir(.{ ._, .cmp }, ty, dst_mcv, src_mcv); break :result Condition.fromCompareOperator( if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned, - if (flipped) op.reverse() else op, + result_op: { + const flipped_op = if (flipped) op.reverse() else op; + if (abi_size > 8) switch (flipped_op) { + .lt, .gte => {}, + .lte, .gt => unreachable, + .eq, .neq => { + const dst_addr_mcv: MCValue = switch (dst_mcv) { + .memory, .indirect, .load_frame => dst_mcv.address(), + else => .{ .register = try self.copyToTmpRegister( + Type.usize, + dst_mcv.address(), + ) }, + }; + const dst_addr_lock = if (dst_addr_mcv.getReg()) |reg| + self.register_manager.lockReg(reg) + else + null; + defer if (dst_addr_lock) |lock| self.register_manager.unlockReg(lock); + + const src_addr_mcv: MCValue = switch (src_mcv) { + .memory, .indirect, .load_frame => src_mcv.address(), + else => .{ .register = try self.copyToTmpRegister( + Type.usize, + src_mcv.address(), + ) }, + }; + const src_addr_lock = if (src_addr_mcv.getReg()) |reg| + self.register_manager.lockReg(reg) + else + null; + defer if (src_addr_lock) |lock| self.register_manager.unlockReg(lock); + + const regs = try self.register_manager.allocRegs(2, .{ null, null }, gp); + const acc_reg = regs[0].to64(); + const locks = self.register_manager.lockRegsAssumeUnused(2, regs); + defer for (locks) |lock| self.register_manager.unlockReg(lock); + + const limbs_len = std.math.divCeil(u16, abi_size, 8) catch unreachable; + var limb_i: u16 = 0; + while (limb_i < limbs_len) : (limb_i += 1) { + const tmp_reg = regs[@min(limb_i, 1)].to64(); + try self.genSetReg( + tmp_reg, + Type.usize, + dst_addr_mcv.offset(limb_i * 8).deref(), + ); + try self.genBinOpMir( + .{ ._, .xor }, + Type.usize, + .{ .register = tmp_reg }, + src_addr_mcv.offset(limb_i * 8).deref(), + ); + if (limb_i > 0) try self.asmRegisterRegister( + .{ ._, .@"or" }, + acc_reg, + tmp_reg, + ); + } + try self.asmRegisterRegister(.{ ._, .@"test" }, acc_reg, acc_reg); + break :result_op flipped_op; + }, + }; + try self.genBinOpMir(.{ ._, .cmp }, ty, dst_mcv, src_mcv); + break :result_op flipped_op; + }, ); }, .Float => result: { @@ -10006,7 +10087,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; const abi_size = @intCast(u16, dst_ty.abiSize(self.target.*)); const bit_size = @intCast(u16, dst_ty.bitSize(self.target.*)); - const dst_limbs_len = std.math.divCeil(u16, bit_size, 64) catch unreachable; + const dst_limbs_len = math.divCeil(u16, bit_size, 64) catch unreachable; if (dst_signedness != src_signedness and abi_size * 8 > bit_size) { const high_reg = if (dst_mcv.isRegister()) dst_mcv.getReg().? @@ -10071,7 +10152,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; const dst_ty = self.air.typeOfIndex(inst); - const src_size = std.math.divCeil(u32, @max(switch (src_signedness) { + const src_size = math.divCeil(u32, @max(switch (src_signedness) { .signed => src_bits, .unsigned => src_bits + 1, }, 32), 8) catch unreachable; @@ -10124,7 +10205,7 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const dst_signedness = if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; - const dst_size = std.math.divCeil(u32, @max(switch (dst_signedness) { + const dst_size = math.divCeil(u32, @max(switch (dst_signedness) { .signed => dst_bits, .unsigned => dst_bits + 1, }, 32), 8) catch unreachable; diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index a3fd5b69e8..21fc87ff22 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -1145,7 +1145,6 @@ test "nan negation f64" { test "nan negation f128" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 01b63cd081b21954acb10b36f780c30f390e7245 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 16 May 2023 03:30:47 -0400 Subject: x86_64: delete some incorrect code --- src/arch/x86_64/CodeGen.zig | 55 ++++----------------------------------------- test/behavior/math.zig | 2 -- 2 files changed, 4 insertions(+), 53 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 3033048a37..0ac41d3c86 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3250,34 +3250,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { self.regExtraBits(dst_ty) else dst_info.bits % 64; - const partial_mcv = if (dst_info.signedness == .signed and extra_bits > 0) dst: { - const rhs_lock: ?RegisterLock = switch (rhs) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); - - const dst_reg: Register = blk: { - if (lhs.isRegister()) break :blk lhs.register; - break :blk try self.copyToTmpRegister(dst_ty, lhs); - }; - const dst_mcv = MCValue{ .register = dst_reg }; - const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg); - defer self.register_manager.unlockReg(dst_reg_lock); - - const rhs_mcv: MCValue = blk: { - if (rhs.isRegister() or rhs.isMemory()) break :blk rhs; - break :blk MCValue{ .register = try self.copyToTmpRegister(dst_ty, rhs) }; - }; - const rhs_mcv_lock: ?RegisterLock = switch (rhs_mcv) { - .register => |reg| self.register_manager.lockReg(reg), - else => null, - }; - defer if (rhs_mcv_lock) |lock| self.register_manager.unlockReg(lock); - - try self.genIntMulComplexOpMir(Type.isize, dst_mcv, rhs_mcv); - break :dst dst_mcv; - } else try self.genMulDivBinOp(.mul, null, dst_ty, src_ty, lhs, rhs); + const partial_mcv = try self.genMulDivBinOp(.mul, null, dst_ty, src_ty, lhs, rhs); switch (partial_mcv) { .register => |reg| if (extra_bits == 0) { @@ -3290,9 +3263,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .load_frame = .{ .index = frame_index } }; }, else => { - // For now, this is the only supported multiply that doesn't fit in a register, - // so cc being set is impossible. - + // For now, this is the only supported multiply that doesn't fit in a register. assert(dst_info.bits <= 128 and src_pl.data == 64); const frame_index = @@ -3308,7 +3279,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), tuple_ty.structFieldType(1), - .{ .immediate = 0 }, + .{ .immediate = 0 }, // cc being set is impossible ); } else try self.genSetFrameTruncatedOverflowCompare( tuple_ty, @@ -5586,31 +5557,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - // Shift by struct_field_offset. try self.genShiftBinOpMir( .{ ._r, .sh }, Type.usize, dst_mcv, .{ .immediate = field_off }, ); - - // Mask to field_bit_size bits - const field_bit_size = field_ty.bitSize(self.target.*); - const mask = ~@as(u64, 0) >> @intCast(u6, 64 - field_bit_size); - - const tmp_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = mask }); - try self.genBinOpMir(.{ ._, .@"and" }, Type.usize, dst_mcv, .{ .register = tmp_reg }); - - const signedness = - if (field_ty.isAbiInt()) field_ty.intInfo(self.target.*).signedness else .unsigned; - const field_byte_size = @intCast(u32, field_ty.abiSize(self.target.*)); - if (signedness == .signed and field_byte_size < 8) { - try self.asmRegisterRegister( - if (field_byte_size >= 4) .{ ._d, .movsx } else .{ ._, .movsx }, - dst_mcv.register, - registerAlias(dst_mcv.register, field_byte_size), - ); - } + if (self.regExtraBits(field_ty) > 0) try self.truncateRegister(field_ty, dst_reg); break :result if (field_rc.supersetOf(gp)) dst_mcv diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 46f736bf74..cc85594c50 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -783,7 +783,6 @@ test "basic @mulWithOverflow" { test "extensive @mulWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; { @@ -1055,7 +1054,6 @@ test "@subWithOverflow" { test "@shlWithOverflow" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; { -- cgit v1.2.3 From 28c445addde840eec49ed0fd19cc19384a040085 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 16 May 2023 20:39:52 -0400 Subject: x86_64: fix 128-bit atomics on non-linux --- src/arch/x86_64/CodeGen.zig | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 0ac41d3c86..bdcbed2629 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -10214,14 +10214,30 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const exp_mcv = try self.resolveInst(extra.expected_value); if (val_abi_size > 8) { - try self.genSetReg(.rax, Type.usize, exp_mcv); - try self.genSetReg(.rdx, Type.usize, exp_mcv.address().offset(8).deref()); + const exp_addr_mcv: MCValue = switch (exp_mcv) { + .memory, .indirect, .load_frame => exp_mcv.address(), + else => .{ .register = try self.copyToTmpRegister(Type.usize, exp_mcv.address()) }, + }; + const exp_addr_lock = + if (exp_addr_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; + defer if (exp_addr_lock) |lock| self.register_manager.unlockReg(lock); + + try self.genSetReg(.rax, Type.usize, exp_addr_mcv.deref()); + try self.genSetReg(.rdx, Type.usize, exp_addr_mcv.offset(8).deref()); } else try self.genSetReg(.rax, val_ty, exp_mcv); const new_mcv = try self.resolveInst(extra.new_value); const new_reg = if (val_abi_size > 8) new: { - try self.genSetReg(.rbx, Type.usize, new_mcv); - try self.genSetReg(.rcx, Type.usize, new_mcv.address().offset(8).deref()); + const new_addr_mcv: MCValue = switch (new_mcv) { + .memory, .indirect, .load_frame => new_mcv.address(), + else => .{ .register = try self.copyToTmpRegister(Type.usize, new_mcv.address()) }, + }; + const new_addr_lock = + if (new_addr_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; + defer if (new_addr_lock) |lock| self.register_manager.unlockReg(lock); + + try self.genSetReg(.rbx, Type.usize, new_addr_mcv.deref()); + try self.genSetReg(.rcx, Type.usize, new_addr_mcv.offset(8).deref()); break :new null; } else try self.copyToTmpRegister(val_ty, new_mcv); const new_lock = if (new_reg) |reg| self.register_manager.lockRegAssumeUnused(reg) else null; -- cgit v1.2.3 From 35da95fe8765874a1ccffb0d7bfd523b14f44a4a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 17 May 2023 00:23:11 -0400 Subject: x86_64: implement integer vector `@truncate` --- src/arch/x86_64/CodeGen.zig | 124 +++++++++++++++++++++++++++++++++++------- src/arch/x86_64/Encoding.zig | 3 + src/arch/x86_64/Mir.zig | 8 +++ src/arch/x86_64/encodings.zig | 21 +++++++ test/behavior/truncate.zig | 1 - 5 files changed, 136 insertions(+), 21 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index bdcbed2629..a258f732f0 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2709,28 +2709,112 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.air.typeOfIndex(inst); - const dst_abi_size = dst_ty.abiSize(self.target.*); - if (dst_abi_size > 8) { - return self.fail("TODO implement trunc for abi sizes larger than 8", .{}); - } + const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const src_ty = self.air.typeOf(ty_op.operand); + const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); - const src_mcv = try self.resolveInst(ty_op.operand); - const src_lock = switch (src_mcv) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (src_lock) |lock| self.register_manager.unlockReg(lock); + const result = result: { + const src_mcv = try self.resolveInst(ty_op.operand); + const src_lock = + if (src_mcv.getReg()) |reg| self.register_manager.lockRegAssumeUnused(reg) else null; + defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) - src_mcv - else - try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); + const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) + src_mcv + else + try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); + + if (dst_ty.zigTypeTag() == .Vector) { + assert(src_ty.zigTypeTag() == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); + const dst_info = dst_ty.childType().intInfo(self.target.*); + const src_info = src_ty.childType().intInfo(self.target.*); + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) { + 8 => switch (src_info.bits) { + 16 => switch (dst_ty.vectorLen()) { + 1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw }, + 9...16 => if (self.hasFeature(.avx2)) .{ .vp_b, .ackusw } else null, + else => null, + }, + else => null, + }, + 16 => switch (src_info.bits) { + 32 => switch (dst_ty.vectorLen()) { + 1...4 => if (self.hasFeature(.avx)) + .{ .vp_w, .ackusd } + else if (self.hasFeature(.sse4_1)) + .{ .p_w, .ackusd } + else + null, + 5...8 => if (self.hasFeature(.avx2)) .{ .vp_w, .ackusd } else null, + else => null, + }, + else => null, + }, + else => null, + })) |tag| tag else return self.fail("TODO implement airTrunc for {}", .{ + dst_ty.fmt(self.bin_file.options.module.?), + }); - // when truncating a `u16` to `u5`, for example, those top 3 bits in the result - // have to be removed. this only happens if the dst if not a power-of-two size. - if (self.regExtraBits(dst_ty) > 0) try self.truncateRegister(dst_ty, dst_mcv.register.to64()); + var mask_pl = Value.Payload.U64{ + .base = .{ .tag = .int_u64 }, + .data = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits), + }; + const mask_val = Value.initPayload(&mask_pl.base); - return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); + var splat_pl = Value.Payload.SubValue{ + .base = .{ .tag = .repeated }, + .data = mask_val, + }; + const splat_val = Value.initPayload(&splat_pl.base); + + var full_pl = Type.Payload.Array{ + .base = .{ .tag = .vector }, + .data = .{ + .len = @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits), + .elem_type = src_ty.childType(), + }, + }; + const full_ty = Type.initPayload(&full_pl.base); + const full_abi_size = @intCast(u32, full_ty.abiSize(self.target.*)); + + const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); + const splat_addr_mcv: MCValue = switch (splat_mcv) { + .memory, .indirect, .load_frame => splat_mcv.address(), + else => .{ .register = try self.copyToTmpRegister(Type.usize, splat_mcv.address()) }, + }; + + const dst_reg = registerAlias(dst_mcv.getReg().?, src_abi_size); + if (self.hasFeature(.avx)) { + try self.asmRegisterRegisterMemory( + .{ .vp_, .@"and" }, + dst_reg, + dst_reg, + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + ); + try self.asmRegisterRegisterRegister(mir_tag, dst_reg, dst_reg, dst_reg); + } else { + try self.asmRegisterMemory( + .{ .p_, .@"and" }, + dst_reg, + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + ); + try self.asmRegisterRegister(mir_tag, dst_reg, dst_reg); + } + break :result dst_mcv; + } + + if (dst_abi_size > 8) { + return self.fail("TODO implement trunc for abi sizes larger than 8", .{}); + } + + // when truncating a `u16` to `u5`, for example, those top 3 bits in the result + // have to be removed. this only happens if the dst if not a power-of-two size. + if (self.regExtraBits(dst_ty) > 0) + try self.truncateRegister(dst_ty, dst_mcv.register.to64()); + + break :result dst_mcv; + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { @@ -11081,8 +11165,8 @@ fn airSelect(self: *Self, inst: Air.Inst.Index) !void { } fn airShuffle(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - _ = ty_op; + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + _ = ty_pl; return self.fail("TODO implement airShuffle for x86_64", .{}); //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 0aaf12013d..6ed0aeeff4 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -263,6 +263,7 @@ pub const Mnemonic = enum { fisttp, fld, // MMX movd, movq, + packssdw, packsswb, packuswb, paddb, paddd, paddq, paddsb, paddsw, paddusb, paddusw, paddw, pand, pandn, por, pxor, pmulhw, pmullw, @@ -319,6 +320,7 @@ pub const Mnemonic = enum { blendpd, blendps, blendvpd, blendvps, extractps, insertps, + packusdw, pextrb, pextrd, pextrq, pinsrb, pinsrd, pinsrq, pmaxsb, pmaxsd, pmaxud, pmaxuw, pminsb, pminsd, pminud, pminuw, @@ -351,6 +353,7 @@ pub const Mnemonic = enum { vmovupd, vmovups, vmulpd, vmulps, vmulsd, vmulss, vorpd, vorps, + vpackssdw, vpacksswb, vpackusdw, vpackuswb, vpaddb, vpaddd, vpaddq, vpaddsb, vpaddsw, vpaddusb, vpaddusw, vpaddw, vpand, vpandn, vpextrb, vpextrd, vpextrq, vpextrw, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 9f59a2afba..96b7742929 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -446,6 +446,12 @@ pub const Inst = struct { /// Bitwise logical xor of packed double-precision floating-point values xor, + /// Pack with signed saturation + ackssw, + /// Pack with signed saturation + ackssd, + /// Pack with unsigned saturation + ackusw, /// Add packed signed integers with signed saturation adds, /// Add packed unsigned integers with unsigned saturation @@ -596,6 +602,8 @@ pub const Inst = struct { /// Replicate single floating-point values movsldup, + /// Pack with unsigned saturation + ackusd, /// Blend packed single-precision floating-point values /// Blend scalar single-precision floating-point values /// Blend packed double-precision floating-point values diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index e087f6dfc7..a0cd1af0a7 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -996,6 +996,11 @@ pub const table = [_]Entry{ .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .none, .sse2 }, + .{ .packsswb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x63 }, 0, .none, .sse2 }, + .{ .packssdw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6b }, 0, .none, .sse2 }, + + .{ .packuswb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x67 }, 0, .none, .sse2 }, + .{ .paddb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfc }, 0, .none, .sse2 }, .{ .paddw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfd }, 0, .none, .sse2 }, .{ .paddd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfe }, 0, .none, .sse2 }, @@ -1101,6 +1106,8 @@ pub const table = [_]Entry{ .{ .insertps, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x21 }, 0, .none, .sse4_1 }, + .{ .packusdw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x2b }, 0, .none, .sse4_1 }, + .{ .pextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .none, .sse4_1 }, .{ .pextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .none, .sse4_1 }, .{ .pextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .long, .sse4_1 }, @@ -1346,6 +1353,13 @@ pub const table = [_]Entry{ .{ .vorps, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .vex_128_wig, .avx }, .{ .vorps, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x0f, 0x56 }, 0, .vex_256_wig, .avx }, + .{ .vpacksswb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x63 }, 0, .vex_128_wig, .avx }, + .{ .vpackssdw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x6b }, 0, .vex_128_wig, .avx }, + + .{ .vpackusdw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x2b }, 0, .vex_128_wig, .avx }, + + .{ .vpackuswb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x67 }, 0, .vex_128_wig, .avx }, + .{ .vpaddb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfc }, 0, .vex_128_wig, .avx }, .{ .vpaddw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfd }, 0, .vex_128_wig, .avx }, .{ .vpaddd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xfe }, 0, .vex_128_wig, .avx }, @@ -1508,6 +1522,13 @@ pub const table = [_]Entry{ .{ .vbroadcastss, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x18 }, 0, .vex_256_w0, .avx2 }, .{ .vbroadcastsd, .rm, &.{ .ymm, .xmm }, &.{ 0x66, 0x0f, 0x38, 0x19 }, 0, .vex_256_w0, .avx2 }, + .{ .vpacksswb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x63 }, 0, .vex_256_wig, .avx2 }, + .{ .vpackssdw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x6b }, 0, .vex_256_wig, .avx2 }, + + .{ .vpackusdw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x2b }, 0, .vex_256_wig, .avx2 }, + + .{ .vpackuswb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x67 }, 0, .vex_256_wig, .avx2 }, + .{ .vpaddb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfc }, 0, .vex_256_wig, .avx2 }, .{ .vpaddw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfd }, 0, .vex_256_wig, .avx2 }, .{ .vpaddd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xfe }, 0, .vex_256_wig, .avx2 }, diff --git a/test/behavior/truncate.zig b/test/behavior/truncate.zig index 1db2f0280f..d3091487b4 100644 --- a/test/behavior/truncate.zig +++ b/test/behavior/truncate.zig @@ -61,7 +61,6 @@ test "truncate on comptime integer" { test "truncate on vectors" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 79bdd2bd633d4817da07e20026756698514d5d7e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 17 May 2023 20:39:55 -0400 Subject: x86_64: implement saturating add/sub for weird types --- src/arch/x86_64/CodeGen.zig | 55 +++++++++++++++++++++++++++++++-- test/behavior/saturating_arithmetic.zig | 2 -- 2 files changed, 52 insertions(+), 5 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index a258f732f0..7448bfb498 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2967,20 +2967,43 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(limit_lock); const reg_bits = self.regBitSize(ty); + const reg_extra_bits = self.regExtraBits(ty); const cc: Condition = if (ty.isSignedInt()) cc: { + if (reg_extra_bits > 0) { + try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); + } try self.genSetReg(limit_reg, ty, dst_mcv); try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, }); + if (reg_extra_bits > 0) { + const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv); + const shifted_rhs_mcv = MCValue{ .register = shifted_rhs_reg }; + const shifted_rhs_lock = self.register_manager.lockRegAssumeUnused(shifted_rhs_reg); + defer self.register_manager.unlockReg(shifted_rhs_lock); + + try self.genShiftBinOpMir( + .{ ._l, .sa }, + ty, + shifted_rhs_mcv, + .{ .immediate = reg_extra_bits }, + ); + try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, shifted_rhs_mcv); + } else try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - reg_bits), + .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(self.target.*)), }); + + try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); + if (reg_extra_bits > 0) { + try self.genBinOpMir(.{ ._, .cmp }, ty, dst_mcv, limit_mcv); + break :cc .a; + } break :cc .c; }; - try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); try self.asmCmovccRegisterRegister( @@ -2989,6 +3012,10 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { cc, ); + if (reg_extra_bits > 0 and ty.isSignedInt()) { + try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); + } + return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -3018,18 +3045,36 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(limit_lock); const reg_bits = self.regBitSize(ty); + const reg_extra_bits = self.regExtraBits(ty); const cc: Condition = if (ty.isSignedInt()) cc: { + if (reg_extra_bits > 0) { + try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); + } try self.genSetReg(limit_reg, ty, dst_mcv); try self.genShiftBinOpMir(.{ ._r, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, .{ .immediate = (@as(u64, 1) << @intCast(u6, reg_bits - 1)) - 1, }); + if (reg_extra_bits > 0) { + const shifted_rhs_reg = try self.copyToTmpRegister(ty, rhs_mcv); + const shifted_rhs_mcv = MCValue{ .register = shifted_rhs_reg }; + const shifted_rhs_lock = self.register_manager.lockRegAssumeUnused(shifted_rhs_reg); + defer self.register_manager.unlockReg(shifted_rhs_lock); + + try self.genShiftBinOpMir( + .{ ._l, .sa }, + ty, + shifted_rhs_mcv, + .{ .immediate = reg_extra_bits }, + ); + try self.genBinOpMir(.{ ._, .sub }, ty, dst_mcv, shifted_rhs_mcv); + } else try self.genBinOpMir(.{ ._, .sub }, ty, dst_mcv, rhs_mcv); break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ .immediate = 0 }); + try self.genBinOpMir(.{ ._, .sub }, ty, dst_mcv, rhs_mcv); break :cc .c; }; - try self.genBinOpMir(.{ ._, .sub }, ty, dst_mcv, rhs_mcv); const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); try self.asmCmovccRegisterRegister( @@ -3038,6 +3083,10 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { cc, ); + if (reg_extra_bits > 0 and ty.isSignedInt()) { + try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); + } + return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); } diff --git a/test/behavior/saturating_arithmetic.zig b/test/behavior/saturating_arithmetic.zig index 77304b1c6b..18baada0e5 100644 --- a/test/behavior/saturating_arithmetic.zig +++ b/test/behavior/saturating_arithmetic.zig @@ -5,7 +5,6 @@ const maxInt = std.math.maxInt; const expect = std.testing.expect; test "saturating add" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -79,7 +78,6 @@ test "saturating add 128bit" { } test "saturating subtraction" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 47405b1a1c29b8a907997af18291eb6eb1cf3e02 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 18 May 2023 20:47:00 -0400 Subject: x86_64: fix `@bitCast` when the operand dies --- src/arch/x86_64/CodeGen.zig | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 7448bfb498..e835242379 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -10154,18 +10154,22 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const dst_rc = regClassForType(dst_ty); const src_rc = regClassForType(src_ty); const src_mcv = try self.resolveInst(ty_op.operand); - if (dst_rc.supersetOf(src_rc) and self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) - break :result src_mcv; const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const dst_mcv = try self.allocRegOrMem(inst, true); - try self.genCopy( - if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty, - dst_mcv, - src_mcv, - ); + const dst_mcv = if (dst_rc.supersetOf(src_rc) and + self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) + src_mcv + else dst: { + const dst_mcv = try self.allocRegOrMem(inst, true); + try self.genCopy( + if (!dst_mcv.isMemory() or src_mcv.isMemory()) dst_ty else src_ty, + dst_mcv, + src_mcv, + ); + break :dst dst_mcv; + }; const dst_signedness = if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; -- cgit v1.2.3 From eb77e3381fac5f5bba2254cf5a7369aaea930e4e Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Thu, 27 Apr 2023 16:15:14 +0200 Subject: wasm: implement `@frameAddress` --- src/arch/wasm/CodeGen.zig | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 11969d567a..3ae7f639fe 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1242,7 +1242,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // check if we have to initialize and allocate anything into the stack frame. // If so, create enough stack space and insert the instructions at the front of the list. - if (func.stack_size > 0) { + if (func.initial_stack_value != .none) { var prologue = std.ArrayList(Mir.Inst).init(func.gpa); defer prologue.deinit(); @@ -1963,11 +1963,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .tag_name => func.airTagName(inst), .error_set_has_value => func.airErrorSetHasValue(inst), + .frame_addr => func.airFrameAddress(inst), .mul_sat, .mod, .assembly, - .frame_addr, .bit_reverse, .is_err_ptr, .is_non_err_ptr, @@ -6969,3 +6969,12 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); } + +fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + if (func.initial_stack_value == .none) { + try func.initializeStack(); + } + try func.emitWValue(func.bottom_stack_value); + const result = try WValue.toLocal(.stack, func, Type.usize); + return func.finishAir(inst, result, &.{}); +} -- cgit v1.2.3 From 8236a26c605c8c4276f2062b3b6f55497cd45e53 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sat, 29 Apr 2023 17:02:50 +0200 Subject: wasm: implement mul, shl and xor for big ints Uses compiler-rt for multiplication and shifting left, while lowers it down using regular instructions for xor. --- src/arch/wasm/CodeGen.zig | 72 +++++++++++++++++++++++++++++------------------ 1 file changed, 45 insertions(+), 27 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 3ae7f639fe..4296cc557f 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2565,37 +2565,55 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { if (ty.intInfo(func.target).bits > 128) { - return func.fail("TODO: Implement binary operation for big integer", .{}); + return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{}); } - if (op != .add and op != .sub) { - return func.fail("TODO: Implement binary operation for big integers", .{}); - } - - const result = try func.allocStack(ty); - var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); - defer lhs_high_bit.free(func); - var rhs_high_bit = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64); - defer rhs_high_bit.free(func); - var high_op_res = try (try func.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(func, Type.u64); - defer high_op_res.free(func); - - const lhs_low_bit = try func.load(lhs, Type.u64, 8); - const rhs_low_bit = try func.load(rhs, Type.u64, 8); - const low_op_res = try func.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op); + switch (op) { + .mul => return func.callIntrinsic("__multi3", &.{ ty, ty }, ty, &.{ lhs, rhs }), + .shr => return func.callIntrinsic("__lshrti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), + .xor => { + const result = try func.allocStack(ty); + try func.emitWValue(result); + const lhs_high_bit = try func.load(lhs, Type.u64, 0); + const rhs_high_bit = try func.load(rhs, Type.u64, 0); + const xor_high_bit = try func.binOp(lhs_high_bit, rhs_high_bit, Type.u64, .xor); + try func.store(.stack, xor_high_bit, Type.u64, result.offset()); - const lt = if (op == .add) blk: { - break :blk try func.cmp(high_op_res, rhs_high_bit, Type.u64, .lt); - } else if (op == .sub) blk: { - break :blk try func.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt); - } else unreachable; - const tmp = try func.intcast(lt, Type.u32, Type.u64); - var tmp_op = try (try func.binOp(low_op_res, tmp, Type.u64, op)).toLocal(func, Type.u64); - defer tmp_op.free(func); + try func.emitWValue(result); + const lhs_low_bit = try func.load(lhs, Type.u64, 8); + const rhs_low_bit = try func.load(rhs, Type.u64, 8); + const xor_low_bit = try func.binOp(lhs_low_bit, rhs_low_bit, Type.u64, .xor); + try func.store(.stack, xor_low_bit, Type.u64, result.offset() + 8); + return result; + }, + .add, .sub => { + const result = try func.allocStack(ty); + var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); + defer lhs_high_bit.free(func); + var rhs_high_bit = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64); + defer rhs_high_bit.free(func); + var high_op_res = try (try func.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(func, Type.u64); + defer high_op_res.free(func); - try func.store(result, high_op_res, Type.u64, 0); - try func.store(result, tmp_op, Type.u64, 8); - return result; + const lhs_low_bit = try func.load(lhs, Type.u64, 8); + const rhs_low_bit = try func.load(rhs, Type.u64, 8); + const low_op_res = try func.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op); + + const lt = if (op == .add) blk: { + break :blk try func.cmp(high_op_res, rhs_high_bit, Type.u64, .lt); + } else if (op == .sub) blk: { + break :blk try func.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt); + } else unreachable; + const tmp = try func.intcast(lt, Type.u32, Type.u64); + var tmp_op = try (try func.binOp(low_op_res, tmp, Type.u64, op)).toLocal(func, Type.u64); + defer tmp_op.free(func); + + try func.store(result, high_op_res, Type.u64, 0); + try func.store(result, tmp_op, Type.u64, 8); + return result; + }, + else => return func.fail("TODO: Implement binary operation for big integers: '{s}'", .{@tagName(op)}), + } } const FloatOp = enum { -- cgit v1.2.3 From 992de8e61718a1a77666e473dc37872afbb80c98 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 1 May 2023 19:39:40 +0200 Subject: wasm: implement `@addWithOverflow` for 64bit ints --- src/arch/wasm/CodeGen.zig | 41 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 4296cc557f..53c537c463 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -5553,7 +5553,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("TODO: Implement overflow arithmetic for integer bitsize: {d}", .{int_info.bits}); }; - if (wasm_bits > 32) { + if (wasm_bits > 64) { return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits}); } @@ -5586,7 +5586,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addLabel(.local_set, overflow_bit.local.value); break :blk down_cast; } - } else if (int_info.signedness == .signed) blk: { + } else if (int_info.signedness == .signed and wasm_bits == 32) blk: { const lhs_abs = try func.signAbsValue(lhs, lhs_ty); const rhs_abs = try func.signAbsValue(rhs, lhs_ty); const bin_op = try (try func.binOp(lhs_abs, rhs_abs, lhs_ty, .mul)).toLocal(func, lhs_ty); @@ -5594,7 +5594,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { _ = try func.cmp(mul_abs, bin_op, lhs_ty, .neq); try func.addLabel(.local_set, overflow_bit.local.value); break :blk try func.wrapOperand(bin_op, lhs_ty); - } else blk: { + } else if (wasm_bits == 32) blk: { var bin_op = try (try func.binOp(lhs, rhs, lhs_ty, .mul)).toLocal(func, lhs_ty); defer bin_op.free(func); const shift_imm = if (wasm_bits == 32) @@ -5605,7 +5605,40 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { _ = try func.cmp(shr, zero, lhs_ty, .neq); try func.addLabel(.local_set, overflow_bit.local.value); break :blk try func.wrapOperand(bin_op, lhs_ty); - }; + } else if (int_info.bits == 64 and int_info.signedness == .unsigned) blk: { + const new_ty = Type.initTag(.u128); + var lhs_upcast = try (try func.intcast(lhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); + defer lhs_upcast.free(func); + var rhs_upcast = try (try func.intcast(rhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); + defer rhs_upcast.free(func); + const bin_op = try func.binOp(lhs_upcast, rhs_upcast, new_ty, .mul); + const lsb = try func.load(bin_op, lhs_ty, 8); + _ = try func.cmp(lsb, zero, lhs_ty, .neq); + try func.addLabel(.local_set, overflow_bit.local.value); + + break :blk try func.load(bin_op, lhs_ty, 0); + } else if (int_info.bits == 64 and int_info.signedness == .signed) blk: { + const shift_val: WValue = .{ .imm64 = 63 }; + var lhs_shifted = try (try func.binOp(lhs, shift_val, lhs_ty, .shr)).toLocal(func, lhs_ty); + defer lhs_shifted.free(func); + var rhs_shifted = try (try func.binOp(rhs, shift_val, lhs_ty, .shr)).toLocal(func, lhs_ty); + defer rhs_shifted.free(func); + + const bin_op = try func.callIntrinsic( + "__multi3", + &[_]Type{Type.i64} ** 4, + Type.initTag(.i128), + &.{ lhs, lhs_shifted, rhs, rhs_shifted }, + ); + const res = try func.allocLocal(lhs_ty); + const msb = try func.load(bin_op, lhs_ty, 0); + try func.addLabel(.local_tee, res.local.value); + const msb_shifted = try func.binOp(msb, shift_val, lhs_ty, .shr); + const lsb = try func.load(bin_op, lhs_ty, 8); + _ = try func.cmp(lsb, msb_shifted, lhs_ty, .neq); + try func.addLabel(.local_set, overflow_bit.local.value); + break :blk res; + } else return func.fail("TODO: @mulWithOverflow for integers between 32 and 64 bits", .{}); var bin_op_local = try bin_op.toLocal(func, lhs_ty); defer bin_op_local.free(func); -- cgit v1.2.3 From d353d208e295a01d6f844ccdb7e641a94e6fcb11 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Wed, 3 May 2023 20:16:52 +0200 Subject: wasm: implement `@mulWithOverflow` for big ints Currently we only support exact 128 bit *unsigned* integers --- src/arch/wasm/CodeGen.zig | 67 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 6 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 53c537c463..afe66c504e 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -5550,16 +5550,12 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const int_info = lhs_ty.intInfo(func.target); const wasm_bits = toWasmBits(int_info.bits) orelse { - return func.fail("TODO: Implement overflow arithmetic for integer bitsize: {d}", .{int_info.bits}); - }; - - if (wasm_bits > 64) { return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits}); - } + }; const zero = switch (wasm_bits) { 32 => WValue{ .imm32 = 0 }, - 64 => WValue{ .imm64 = 0 }, + 64, 128 => WValue{ .imm64 = 0 }, else => unreachable, }; @@ -5638,6 +5634,65 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { _ = try func.cmp(lsb, msb_shifted, lhs_ty, .neq); try func.addLabel(.local_set, overflow_bit.local.value); break :blk res; + } else if (int_info.bits == 128 and int_info.signedness == .unsigned) blk: { + var lhs_msb = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); + defer lhs_msb.free(func); + var lhs_lsb = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64); + defer lhs_lsb.free(func); + var rhs_msb = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64); + defer rhs_msb.free(func); + var rhs_lsb = try (try func.load(rhs, Type.u64, 8)).toLocal(func, Type.u64); + defer rhs_lsb.free(func); + + const mul1 = try func.callIntrinsic( + "__multi3", + &[_]Type{Type.i64} ** 4, + Type.initTag(.i128), + &.{ lhs_lsb, zero, rhs_msb, zero }, + ); + const mul2 = try func.callIntrinsic( + "__multi3", + &[_]Type{Type.i64} ** 4, + Type.initTag(.i128), + &.{ rhs_lsb, zero, lhs_msb, zero }, + ); + const mul3 = try func.callIntrinsic( + "__multi3", + &[_]Type{Type.i64} ** 4, + Type.initTag(.i128), + &.{ lhs_msb, zero, rhs_msb, zero }, + ); + + const rhs_lsb_not_zero = try func.cmp(rhs_lsb, zero, Type.u64, .neq); + const lhs_lsb_not_zero = try func.cmp(lhs_lsb, zero, Type.u64, .neq); + const lsb_and = try func.binOp(rhs_lsb_not_zero, lhs_lsb_not_zero, Type.bool, .@"and"); + const mul1_lsb = try func.load(mul1, Type.u64, 8); + const mul1_lsb_not_zero = try func.cmp(mul1_lsb, zero, Type.u64, .neq); + const lsb_or1 = try func.binOp(lsb_and, mul1_lsb_not_zero, Type.bool, .@"or"); + const mul2_lsb = try func.load(mul2, Type.u64, 8); + const mul2_lsb_not_zero = try func.cmp(mul2_lsb, zero, Type.u64, .neq); + const lsb_or = try func.binOp(lsb_or1, mul2_lsb_not_zero, Type.bool, .@"or"); + + const mul1_msb = try func.load(mul1, Type.u64, 0); + const mul2_msb = try func.load(mul2, Type.u64, 0); + const mul_add1 = try func.binOp(mul1_msb, mul2_msb, Type.u64, .add); + + var mul3_lsb = try (try func.load(mul3, Type.u64, 8)).toLocal(func, Type.u64); + defer mul3_lsb.free(func); + var mul_add2 = try (try func.binOp(mul_add1, mul3_lsb, Type.u64, .add)).toLocal(func, Type.u64); + defer mul_add2.free(func); + const mul_add_lt = try func.cmp(mul_add2, mul3_lsb, Type.u64, .lt); + + // result for overflow bit + _ = try func.binOp(lsb_or, mul_add_lt, Type.bool, .@"or"); + try func.addLabel(.local_set, overflow_bit.local.value); + + const tmp_result = try func.allocStack(Type.initTag(.u128)); + try func.emitWValue(tmp_result); + const mul3_msb = try func.load(mul3, Type.u64, 0); + try func.store(.stack, mul3_msb, Type.u64, tmp_result.offset()); + try func.store(tmp_result, mul_add2, Type.u64, 8); + break :blk tmp_result; } else return func.fail("TODO: @mulWithOverflow for integers between 32 and 64 bits", .{}); var bin_op_local = try bin_op.toLocal(func, lhs_ty); defer bin_op_local.free(func); -- cgit v1.2.3 From e20976b7f209a768cb55a37e6a58ed177d76013e Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sat, 6 May 2023 17:10:48 +0200 Subject: wasm: fix miscompilation for shifting This fix ensures that when we are shifting left or right, both operands have the same WebAssembly type. e.g. it's not possible to shift a 64 bit integer and 32 bit integer together and will fail WebAssembly's validator. By first coercing the values to the same type, we ensure we satisfy the validator. --- src/arch/wasm/CodeGen.zig | 62 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 7 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index afe66c504e..799c4a40a2 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2523,10 +2523,34 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const ty = func.air.typeOf(bin_op.lhs); + const lhs_ty = func.air.typeOf(bin_op.lhs); + const rhs_ty = func.air.typeOf(bin_op.rhs); + + // For certain operations, such as shifting, the types are different. + // When converting this to a WebAssembly type, they *must* match to perform + // an operation. For this reason we verify if the WebAssembly type is different, in which + // case we first coerce the operands to the same type before performing the operation. + // For big integers we can ignore this as we will call into compiler-rt which handles this. + const result = switch (op) { + .shr, .shl => res: { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); + }; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { + const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); + break :blk try tmp.toLocal(func, lhs_ty); + } else rhs; + const stack_result = try func.binOp(lhs, new_rhs, lhs_ty, op); + break :res try stack_result.toLocal(func, lhs_ty); + }, + else => res: { + const stack_result = try func.binOp(lhs, rhs, lhs_ty, op); + break :res try stack_result.toLocal(func, lhs_ty); + }, + }; - const stack_value = try func.binOp(lhs, rhs, ty, op); - func.finishAir(inst, try stack_value.toLocal(func, ty), &.{ bin_op.lhs, bin_op.rhs }); + func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } /// Performs a binary operation on the given `WValue`'s @@ -2769,14 +2793,38 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const ty = func.air.typeOf(bin_op.lhs); + const lhs_ty = func.air.typeOf(bin_op.lhs); + const rhs_ty = func.air.typeOf(bin_op.rhs); - if (ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { return func.fail("TODO: Implement wrapping arithmetic for vectors", .{}); } - const result = try (try func.wrapBinOp(lhs, rhs, ty, op)).toLocal(func, ty); - func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); + // For certain operations, such as shifting, the types are different. + // When converting this to a WebAssembly type, they *must* match to perform + // an operation. For this reason we verify if the WebAssembly type is different, in which + // case we first coerce the operands to the same type before performing the operation. + // For big integers we can ignore this as we will call into compiler-rt which handles this. + const result = switch (op) { + .shr, .shl => res: { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); + }; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { + const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); + break :blk try tmp.toLocal(func, lhs_ty); + } else rhs; + const stack_result = try func.wrapBinOp(lhs, new_rhs, lhs_ty, op); + break :res try stack_result.toLocal(func, lhs_ty); + }, + else => res: { + const stack_result = try func.wrapBinOp(lhs, rhs, lhs_ty, op); + break :res try stack_result.toLocal(func, lhs_ty); + }, + }; + + return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } /// Performs a wrapping binary operation. -- cgit v1.2.3 From 67d27dbe631d1292be363f4121217d66e2d7fd0f Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sat, 6 May 2023 17:46:51 +0200 Subject: wasm: fix liveness bugs Make sure to increase the reference count for `intcast` when the operand doesn't require any casting of the respective WebAssembly type. Function arguments have a reserved slot, and therefore cannot be re-used arbitrarily --- src/arch/wasm/CodeGen.zig | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 799c4a40a2..c2d2ffd8e4 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -881,6 +881,9 @@ fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void { const value = func.currentBranch().values.getPtr(ref) orelse return; if (value.* != .local) return; log.debug("Decreasing reference for ref: %{?d}\n", .{Air.refToIndex(ref)}); + if (value.local.value < func.arg_index) { + return; // function arguments can never be re-used + } value.local.references -= 1; // if this panics, a call to `reuseOperand` was forgotten by the developer if (value.local.references == 0) { value.free(func); @@ -4021,7 +4024,13 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("todo Wasm intcast for bitsize > 128", .{}); } - const result = try (try func.intcast(operand, operand_ty, ty)).toLocal(func, ty); + const op_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const result = if (op_bits == wanted_bits) + func.reuseOperand(ty_op.operand, operand) + else + try (try func.intcast(operand, operand_ty, ty)).toLocal(func, ty); + func.finishAir(inst, result, &.{}); } -- cgit v1.2.3 From f2860bb4f40565e43f51757e6cb604bb2df16ae0 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sun, 7 May 2023 18:48:25 +0200 Subject: wasm: more liveness fixes --- src/arch/wasm/CodeGen.zig | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index c2d2ffd8e4..29253797d2 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -880,10 +880,11 @@ fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void { // TODO: Upon branch consolidation free any locals if needed. const value = func.currentBranch().values.getPtr(ref) orelse return; if (value.* != .local) return; - log.debug("Decreasing reference for ref: %{?d}\n", .{Air.refToIndex(ref)}); - if (value.local.value < func.arg_index) { + const reserved_indexes = func.args.len + @boolToInt(func.return_value != .none); + if (value.local.value < reserved_indexes) { return; // function arguments can never be re-used } + log.debug("Decreasing reference for ref: %{?d}, using local '{d}'\n", .{ Air.refToIndex(ref), value.local.value }); value.local.references -= 1; // if this panics, a call to `reuseOperand` was forgotten by the developer if (value.local.references == 0) { value.free(func); @@ -4024,7 +4025,7 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("todo Wasm intcast for bitsize > 128", .{}); } - const op_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(func.target))).?; const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) -- cgit v1.2.3 From 99422cb5284f3e15c1b5a8598a6b1622c0e7b6ca Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 8 May 2023 20:22:55 +0200 Subject: wasm: add `dead` tag to `WValue` This new tag is used for freed locals that are not allowed to have any remaining references pointing to it. This new tag allows us to easily identify liveness bugs. Previously we would set the entire region to `undefined` which would incorrectly set the tag to `function_index`, making codegen think it was a valid `WValue` while it wasn't. --- src/arch/wasm/CodeGen.zig | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 29253797d2..f602cf80a7 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -29,6 +29,9 @@ const errUnionErrorOffset = codegen.errUnionErrorOffset; /// Wasm Value, created when generating an instruction const WValue = union(enum) { + /// `WValue` which has been freed and may no longer hold + /// any references. + dead: void, /// May be referenced but is unused none: void, /// The value lives on top of the stack @@ -86,6 +89,7 @@ const WValue = union(enum) { fn offset(value: WValue) u32 { switch (value) { .stack_offset => |stack_offset| return stack_offset.value, + .dead => unreachable, else => return 0, } } @@ -123,7 +127,7 @@ const WValue = union(enum) { .f64 => gen.free_locals_f64.append(gen.gpa, local_value) catch return, .v128 => gen.free_locals_v128.append(gen.gpa, local_value) catch return, } - value.* = undefined; + value.* = .dead; } }; @@ -832,6 +836,7 @@ const Branch = struct { fn deinit(branch: *Branch, gpa: Allocator) void { branch.values.deinit(gpa); + branch.* = undefined; } }; @@ -884,7 +889,7 @@ fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void { if (value.local.value < reserved_indexes) { return; // function arguments can never be re-used } - log.debug("Decreasing reference for ref: %{?d}, using local '{d}'\n", .{ Air.refToIndex(ref), value.local.value }); + log.debug("Decreasing reference for ref: %{?d}, using local '{d}'", .{ Air.refToIndex(ref), value.local.value }); value.local.references -= 1; // if this panics, a call to `reuseOperand` was forgotten by the developer if (value.local.references == 0) { value.free(func); @@ -1030,6 +1035,7 @@ fn genBlockType(ty: Type, target: std.Target) u8 { /// Writes the bytecode depending on the given `WValue` in `val` fn emitWValue(func: *CodeGen, value: WValue) InnerError!void { switch (value) { + .dead => unreachable, // reference to free'd `WValue` (missing reuseOperand?) .none, .stack => {}, // no-op .local => |idx| try func.addLabel(.local_get, idx.value), .imm32 => |val| try func.addImm32(@bitCast(i32, val)), @@ -1226,6 +1232,7 @@ fn genFunc(func: *CodeGen) InnerError!void { defer { var outer_branch = func.branches.pop(); outer_branch.deinit(func.gpa); + assert(func.branches.items.len == 0); // missing branch merge } // Generate MIR for function body try func.genBody(func.air.getMainBody()); -- cgit v1.2.3 From 8be69f41328ebc0331434fd9d4008985463188c9 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Tue, 9 May 2023 19:50:00 +0200 Subject: wasm: simplify merging of branches Rather than adding all values that were generated in the child branch, we simply discard them as outer branches cannot refer to values produced from an inner branch. --- src/arch/wasm/CodeGen.zig | 72 ++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 42 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index f602cf80a7..04b9b7194c 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3259,9 +3259,13 @@ fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .label = func.block_depth, .value = block_result, }); + try func.genBody(body); try func.endBlock(); + const liveness = func.liveness.getBlock(inst); + try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths.len); + func.finishAir(inst, block_result, &.{}); } @@ -3316,41 +3320,27 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addLabel(.br_if, 0); try func.branches.ensureUnusedCapacity(func.gpa, 2); - - func.branches.appendAssumeCapacity(.{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len)); - try func.genBody(else_body); - try func.endBlock(); - var else_stack = func.branches.pop(); - defer else_stack.deinit(func.gpa); + { + func.branches.appendAssumeCapacity(.{}); + try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len)); + try func.genBody(else_body); + try func.endBlock(); + var else_stack = func.branches.pop(); + else_stack.deinit(func.gpa); + } // Outer block that matches the condition - func.branches.appendAssumeCapacity(.{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len)); - try func.genBody(then_body); - var then_stack = func.branches.pop(); - defer then_stack.deinit(func.gpa); - - try func.mergeBranch(&else_stack); - try func.mergeBranch(&then_stack); + { + func.branches.appendAssumeCapacity(.{}); + try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len)); + try func.genBody(then_body); + var then_stack = func.branches.pop(); + then_stack.deinit(func.gpa); + } func.finishAir(inst, .none, &.{}); } -fn mergeBranch(func: *CodeGen, branch: *const Branch) !void { - const parent = func.currentBranch(); - - const target_slice = branch.values.entries.slice(); - const target_keys = target_slice.items(.key); - const target_values = target_slice.items(.value); - - try parent.values.ensureTotalCapacity(func.gpa, parent.values.capacity() + branch.values.count()); - for (target_keys, 0..) |key, index| { - // TODO: process deaths from branches - parent.values.putAssumeCapacity(key, target_values[index]); - } -} - fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; @@ -3860,30 +3850,21 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } func.branches.appendAssumeCapacity(.{}); - try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[index].len); - for (liveness.deaths[index]) |operand| { - func.processDeath(Air.indexToRef(operand)); - } try func.genBody(case.body); try func.endBlock(); var case_branch = func.branches.pop(); - defer case_branch.deinit(func.gpa); - try func.mergeBranch(&case_branch); + case_branch.deinit(func.gpa); } if (has_else_body) { func.branches.appendAssumeCapacity(.{}); const else_deaths = liveness.deaths.len - 1; try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[else_deaths].len); - for (liveness.deaths[else_deaths]) |operand| { - func.processDeath(Air.indexToRef(operand)); - } try func.genBody(else_body); try func.endBlock(); var else_branch = func.branches.pop(); - defer else_branch.deinit(func.gpa); - try func.mergeBranch(&else_branch); + else_branch.deinit(func.gpa); } func.finishAir(inst, .none, &.{}); } @@ -5992,7 +5973,7 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.Try, pl_op.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; const err_union_ty = func.air.typeOf(pl_op.operand); - const result = try lowerTry(func, err_union, body, err_union_ty, false); + const result = try lowerTry(func, inst, err_union, body, err_union_ty, false); func.finishAir(inst, result, &.{pl_op.operand}); } @@ -6002,12 +5983,13 @@ fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const err_union_ptr = try func.resolveInst(extra.data.ptr); const body = func.air.extra[extra.end..][0..extra.data.body_len]; const err_union_ty = func.air.typeOf(extra.data.ptr).childType(); - const result = try lowerTry(func, err_union_ptr, body, err_union_ty, true); + const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true); func.finishAir(inst, result, &.{extra.data.ptr}); } fn lowerTry( func: *CodeGen, + inst: Air.Inst.Index, err_union: WValue, body: []const Air.Inst.Index, err_union_ty: Type, @@ -6035,8 +6017,14 @@ fn lowerTry( } try func.addTag(.i32_eqz); try func.addLabel(.br_if, 0); // jump out of block when error is '0' + + const liveness = func.liveness.getCondBr(inst); + try func.branches.append(func.gpa, .{}); + try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.else_deaths.len + liveness.then_deaths.len); try func.genBody(body); try func.endBlock(); + var branch = func.branches.pop(); + branch.deinit(func.gpa); } // if we reach here it means error was not set, and we want the payload -- cgit v1.2.3 From 43e89026ac90ee6e8c2cb066068eb8ff10352ac1 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Wed, 10 May 2023 17:03:17 +0200 Subject: wasm: fix double free of locals A copy was being made of a WValue variable, which meant the call to `free` would insert the local that was being held by said WValue was appended to the free list twice. This led to the same local being reused even though it wasn't free and would lead to it being over- written by a new value. --- src/arch/wasm/CodeGen.zig | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 04b9b7194c..6ad22de43c 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -127,6 +127,7 @@ const WValue = union(enum) { .f64 => gen.free_locals_f64.append(gen.gpa, local_value) catch return, .v128 => gen.free_locals_v128.append(gen.gpa, local_value) catch return, } + log.debug("freed local ({d}) of type {}", .{ local_value, valtype }); value.* = .dead; } }; @@ -1092,27 +1093,27 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { const valtype = typeToValtype(ty, func.target); switch (valtype) { .i32 => if (func.free_locals_i32.popOrNull()) |index| { - log.debug("reusing local ({d}) of type {}\n", .{ index, valtype }); + log.debug("reusing local ({d}) of type {}", .{ index, valtype }); return WValue{ .local = .{ .value = index, .references = 1 } }; }, .i64 => if (func.free_locals_i64.popOrNull()) |index| { - log.debug("reusing local ({d}) of type {}\n", .{ index, valtype }); + log.debug("reusing local ({d}) of type {}", .{ index, valtype }); return WValue{ .local = .{ .value = index, .references = 1 } }; }, .f32 => if (func.free_locals_f32.popOrNull()) |index| { - log.debug("reusing local ({d}) of type {}\n", .{ index, valtype }); + log.debug("reusing local ({d}) of type {}", .{ index, valtype }); return WValue{ .local = .{ .value = index, .references = 1 } }; }, .f64 => if (func.free_locals_f64.popOrNull()) |index| { - log.debug("reusing local ({d}) of type {}\n", .{ index, valtype }); + log.debug("reusing local ({d}) of type {}", .{ index, valtype }); return WValue{ .local = .{ .value = index, .references = 1 } }; }, .v128 => if (func.free_locals_v128.popOrNull()) |index| { - log.debug("reusing local ({d}) of type {}\n", .{ index, valtype }); + log.debug("reusing local ({d}) of type {}", .{ index, valtype }); return WValue{ .local = .{ .value = index, .references = 1 } }; }, } - log.debug("new local of type {}\n", .{valtype}); + log.debug("new local of type {}", .{valtype}); // no local was free to be re-used, so allocate a new local instead return func.ensureAllocLocal(ty); } @@ -4948,8 +4949,15 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, } }; - // TODO: this is incorrect Liveness handling code - func.finishAir(inst, result, &.{}); + + if (elements.len <= Liveness.bpi - 1) { + var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); + @memcpy(buf[0..elements.len], elements); + return func.finishAir(inst, result, &buf); + } + var bt = try func.iterateBigTomb(inst, elements.len); + for (elements) |arg| bt.feed(arg); + return bt.finishAir(result); } fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { @@ -5436,11 +5444,10 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro }; var bin_op = try (try func.binOp(lhs, rhs, lhs_ty, op)).toLocal(func, lhs_ty); - defer bin_op.free(func); var result = if (wasm_bits != int_info.bits) blk: { break :blk try (try func.wrapOperand(bin_op, lhs_ty)).toLocal(func, lhs_ty); } else bin_op; - defer result.free(func); // no-op when wasm_bits == int_info.bits + defer result.free(func); const cmp_op: std.math.CompareOperator = if (op == .sub) .gt else .lt; const overflow_bit: WValue = if (is_signed) blk: { -- cgit v1.2.3 From 6c06944b5958e2b624004e986deee8e52c765e6e Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Wed, 10 May 2023 17:04:55 +0200 Subject: wasm: fix return `ret_load` with zero-size type When we have a `ret_load` instruction with a zero-sized type which was not an error, we would not emit any instruction. This resulted in no `return` instruction and also not correctly resetting the global stack_pointer. This commit also enables the regular test runner for the WebAssembly backend. --- lib/test_runner.zig | 4 +--- src/arch/wasm/CodeGen.zig | 9 +++------ 2 files changed, 4 insertions(+), 9 deletions(-) (limited to 'src/arch') diff --git a/lib/test_runner.zig b/lib/test_runner.zig index 8e29d90433..33fe547b57 100644 --- a/lib/test_runner.zig +++ b/lib/test_runner.zig @@ -12,9 +12,7 @@ var cmdline_buffer: [4096]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&cmdline_buffer); pub fn main() void { - if (builtin.zig_backend == .stage2_wasm or - builtin.zig_backend == .stage2_aarch64) - { + if (builtin.zig_backend == .stage2_aarch64) { return mainSimple() catch @panic("test failure"); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 6ad22de43c..73db9221fa 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2122,16 +2122,13 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const ret_ty = func.air.typeOf(un_op).childType(); + + const fn_info = func.decl.ty.fnInfo(); if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { if (ret_ty.isError()) { try func.addImm32(0); - } else { - return func.finishAir(inst, .none, &.{}); } - } - - const fn_info = func.decl.ty.fnInfo(); - if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } -- cgit v1.2.3 From 061d99285d5a73a71a97ed7bbb45f0a7f65acf2d Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 15 May 2023 21:53:24 +0200 Subject: wasm: correctly use elem type when lowering Previously when lowering a value of `elem_ptr` we would multiply the abisize of the parent type by the index, rather than the element type. This would result in an invalid pointer way beyond the correct pointer. We now also pass the current offset to each recursive call to ensure we do not miss inner offsets. --- src/arch/wasm/CodeGen.zig | 63 ++++++++++++++++------------------------------- test/behavior/slice.zig | 1 - 2 files changed, 21 insertions(+), 43 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 73db9221fa..d2a9d1a52f 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2885,26 +2885,25 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { return WValue{ .stack = {} }; } -fn lowerParentPtr(func: *CodeGen, ptr_val: Value, ptr_child_ty: Type) InnerError!WValue { +fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue { switch (ptr_val.tag()) { .decl_ref_mut => { const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return func.lowerParentPtrDecl(ptr_val, decl_index); + return func.lowerParentPtrDecl(ptr_val, decl_index, offset); }, .decl_ref => { const decl_index = ptr_val.castTag(.decl_ref).?.data; - return func.lowerParentPtrDecl(ptr_val, decl_index); + return func.lowerParentPtrDecl(ptr_val, decl_index, offset); }, .variable => { const decl_index = ptr_val.castTag(.variable).?.data.owner_decl; - return func.lowerParentPtrDecl(ptr_val, decl_index); + return func.lowerParentPtrDecl(ptr_val, decl_index, offset); }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent_ty = field_ptr.container_ty; - const parent_ptr = try func.lowerParentPtr(field_ptr.container_ptr, parent_ty); - const offset = switch (parent_ty.zigTypeTag()) { + const field_offset = switch (parent_ty.zigTypeTag()) { .Struct => switch (parent_ty.containerLayout()) { .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, func.target), else => parent_ty.structFieldOffset(field_ptr.field_index, func.target), @@ -2917,8 +2916,8 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, ptr_child_ty: Type) InnerError if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - const offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); - break :blk offset; + const field_offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); + break :blk field_offset; }, }, .Pointer => switch (parent_ty.ptrSize()) { @@ -2931,43 +2930,23 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, ptr_child_ty: Type) InnerError }, else => unreachable, }; - - return switch (parent_ptr) { - .memory => |ptr| WValue{ - .memory_offset = .{ - .pointer = ptr, - .offset = @intCast(u32, offset), - }, - }, - .memory_offset => |mem_off| WValue{ - .memory_offset = .{ - .pointer = mem_off.pointer, - .offset = @intCast(u32, offset) + mem_off.offset, - }, - }, - else => unreachable, - }; + return func.lowerParentPtr(field_ptr.container_ptr, offset + @intCast(u32, field_offset)); }, .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; const index = elem_ptr.index; - const offset = index * ptr_child_ty.abiSize(func.target); - const array_ptr = try func.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty); - - return WValue{ .memory_offset = .{ - .pointer = array_ptr.memory, - .offset = @intCast(u32, offset), - } }; + const elem_offset = index * elem_ptr.elem_ty.abiSize(func.target); + return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset)); }, .opt_payload_ptr => { const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - return func.lowerParentPtr(payload_ptr.container_ptr, payload_ptr.container_ty); + return func.lowerParentPtr(payload_ptr.container_ptr, offset); }, else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}), } } -fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index) InnerError!WValue { +fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { const module = func.bin_file.base.options.module.?; const decl = module.declPtr(decl_index); module.markDeclAlive(decl); @@ -2976,10 +2955,10 @@ fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.In .data = decl.ty, }; const ptr_ty = Type.initPayload(&ptr_ty_payload.base); - return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); + return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset); } -fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!WValue { +fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { if (tv.ty.isSlice()) { return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) }; } @@ -2998,7 +2977,9 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind if (decl.ty.zigTypeTag() == .Fn) { try func.bin_file.addTableFunction(target_sym_index); return WValue{ .function_index = target_sym_index }; - } else return WValue{ .memory = target_sym_index }; + } else if (offset == 0) { + return WValue{ .memory = target_sym_index }; + } else return WValue{ .memory_offset = .{ .pointer = target_sym_index, .offset = offset } }; } /// Converts a signed integer to its 2's complement form and returns @@ -3025,11 +3006,11 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { if (val.isUndefDeep()) return func.emitUndefined(ty); if (val.castTag(.decl_ref)) |decl_ref| { const decl_index = decl_ref.data; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index); + return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); } if (val.castTag(.decl_ref_mut)) |decl_ref_mut| { const decl_index = decl_ref_mut.data.decl_index; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index); + return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); } const target = func.target; switch (ty.zigTypeTag()) { @@ -3063,9 +3044,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => unreachable, }, .Pointer => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => { - return func.lowerParentPtr(val, ty.childType()); - }, + .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, .zero, .null_value => return WValue{ .imm32 = 0 }, else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), @@ -5281,7 +5260,7 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const src_ty = func.air.typeOf(bin_op.rhs); const len = switch (dst_ty.ptrSize()) { .Slice => try func.sliceLen(dst), - .One => @as(WValue, .{ .imm64 = dst_ty.childType().arrayLen() }), + .One => @as(WValue, .{ .imm32 = @intCast(u32, dst_ty.childType().arrayLen()) }), .C, .Many => unreachable, }; const dst_ptr = try func.sliceOrArrayPtr(dst, dst_ty); diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 3b88636dca..bff90518ed 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -185,7 +185,6 @@ test "slicing zero length array" { } test "slicing pointer by length" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; -- cgit v1.2.3 From b93fa9833e6d2f7959eabb2417a154152a8d2d1c Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Tue, 16 May 2023 19:33:19 +0200 Subject: wasm: memset - correctly load the ptr for slices Previously we would use the address of the slice itself, which would result in miscompilations and accidently setting the memory region of the slice itself, rather than based on the `ptr` field. --- src/arch/wasm/CodeGen.zig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d2a9d1a52f..2d0ee2fc69 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4464,7 +4464,9 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType().arrayLen()) }), .C, .Many => unreachable, }; - try func.memset(ptr, len, value); + + const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty); + try func.memset(dst_ptr, len, value); func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); } -- cgit v1.2.3 From 55a260c968aed32001df5355596331db38b13729 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Wed, 17 May 2023 20:31:18 +0200 Subject: wasm: implement `shl` for big integers --- src/arch/wasm/CodeGen.zig | 1 + test/behavior/int128.zig | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2d0ee2fc69..3b5e92fdbb 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2604,6 +2604,7 @@ fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) Inner switch (op) { .mul => return func.callIntrinsic("__multi3", &.{ ty, ty }, ty, &.{ lhs, rhs }), .shr => return func.callIntrinsic("__lshrti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), + .shl => return func.callIntrinsic("__ashlti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), .xor => { const result = try func.allocStack(ty); try func.emitWValue(result); diff --git a/test/behavior/int128.zig b/test/behavior/int128.zig index c4ace7c4c0..cdf32f02c2 100644 --- a/test/behavior/int128.zig +++ b/test/behavior/int128.zig @@ -87,7 +87,6 @@ test "truncate int128" { } test "shift int128" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 4a33aa922e90b76248b259a89be86966eb4898c2 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Thu, 18 May 2023 17:26:20 +0200 Subject: wasm: support `memset` for elem abi size > 1 Previously we incorrectly assumed all memset's to have its element abi-size be 1 byte. This would set the region of memory incorrectly. We now have a more efficient loop, as well as support any element type by re-using the `store` function for each element and moving the pointer by 1 element. --- src/arch/wasm/CodeGen.zig | 188 ++++++++++++++++++++++++++-------------------- test/behavior/memset.zig | 14 ---- 2 files changed, 108 insertions(+), 94 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 3b5e92fdbb..78e722f794 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1605,10 +1605,16 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void { else => {}, } - // TODO: We should probably lower this to a call to compiler_rt - // But for now, we implement it manually - var offset = try func.ensureAllocLocal(Type.usize); // local for counter + // allocate a local for the offset, and set it to 0. + // This to ensure that inside loops we correctly re-set the counter. + var offset = try func.allocLocal(Type.usize); // local for counter defer offset.free(func); + switch (func.arch()) { + .wasm32 => try func.addImm32(0), + .wasm64 => try func.addImm64(0), + else => unreachable, + } + try func.addLabel(.local_set, offset.local.value); // outer block to jump to when loop is done try func.startBlock(.block, wasm.block_empty); @@ -3301,19 +3307,23 @@ fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { { func.branches.appendAssumeCapacity(.{}); try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len)); + defer { + var else_stack = func.branches.pop(); + else_stack.deinit(func.gpa); + } try func.genBody(else_body); try func.endBlock(); - var else_stack = func.branches.pop(); - else_stack.deinit(func.gpa); } // Outer block that matches the condition { func.branches.appendAssumeCapacity(.{}); try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len)); + defer { + var then_stack = func.branches.pop(); + then_stack.deinit(func.gpa); + } try func.genBody(then_body); - var then_stack = func.branches.pop(); - then_stack.deinit(func.gpa); } func.finishAir(inst, .none, &.{}); @@ -3829,20 +3839,24 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } func.branches.appendAssumeCapacity(.{}); try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[index].len); + defer { + var case_branch = func.branches.pop(); + case_branch.deinit(func.gpa); + } try func.genBody(case.body); try func.endBlock(); - var case_branch = func.branches.pop(); - case_branch.deinit(func.gpa); } if (has_else_body) { func.branches.appendAssumeCapacity(.{}); const else_deaths = liveness.deaths.len - 1; try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[else_deaths].len); + defer { + var else_branch = func.branches.pop(); + else_branch.deinit(func.gpa); + } try func.genBody(else_body); try func.endBlock(); - var else_branch = func.branches.pop(); - else_branch.deinit(func.gpa); } func.finishAir(inst, .none, &.{}); } @@ -3971,7 +3985,7 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // write 'undefined' to the payload const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(func.target)); - try func.memset(payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaaaaaaaa }); + try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; }; @@ -4466,8 +4480,13 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void .C, .Many => unreachable, }; + const elem_ty = if (ptr_ty.ptrSize() == .One) + ptr_ty.childType().childType() + else + ptr_ty.childType(); + const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty); - try func.memset(dst_ptr, len, value); + try func.memset(elem_ty, dst_ptr, len, value); func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); } @@ -4476,10 +4495,12 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void /// When the user has enabled the bulk_memory feature, we lower /// this to wasm's memset instruction. When the feature is not present, /// we implement it manually. -fn memset(func: *CodeGen, ptr: WValue, len: WValue, value: WValue) InnerError!void { +fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { + const abi_size = @intCast(u32, elem_ty.abiSize(func.target)); + // When bulk_memory is enabled, we lower it to wasm's memset instruction. - // If not, we lower it ourselves - if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory)) { + // If not, we lower it ourselves. + if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory) and abi_size == 1) { try func.lowerToStack(ptr); try func.emitWValue(value); try func.emitWValue(len); @@ -4487,74 +4508,79 @@ fn memset(func: *CodeGen, ptr: WValue, len: WValue, value: WValue) InnerError!vo return; } - // When the length is comptime-known we do the loop at codegen, rather - // than emitting a runtime loop into the binary - switch (len) { - .imm32, .imm64 => { - const length = switch (len) { - .imm32 => |val| val, - .imm64 => |val| val, - else => unreachable, - }; - - var offset: u32 = 0; - const base = ptr.offset(); - while (offset < length) : (offset += 1) { - try func.emitWValue(ptr); - try func.emitWValue(value); - switch (func.arch()) { - .wasm32 => { - try func.addMemArg(.i32_store8, .{ .offset = base + offset, .alignment = 1 }); - }, - .wasm64 => { - try func.addMemArg(.i64_store8, .{ .offset = base + offset, .alignment = 1 }); - }, - else => unreachable, - } - } - }, - else => { - // TODO: We should probably lower this to a call to compiler_rt - // But for now, we implement it manually - const offset = try func.ensureAllocLocal(Type.usize); // local for counter - // outer block to jump to when loop is done - try func.startBlock(.block, wasm.block_empty); - try func.startBlock(.loop, wasm.block_empty); - try func.emitWValue(offset); + const final_len = switch (len) { + .imm32 => |val| WValue{ .imm32 = val * abi_size }, + .imm64 => |val| WValue{ .imm64 = val * abi_size }, + else => if (abi_size != 1) blk: { + const new_len = try func.ensureAllocLocal(Type.usize); try func.emitWValue(len); switch (func.arch()) { - .wasm32 => try func.addTag(.i32_eq), - .wasm64 => try func.addTag(.i64_eq), - else => unreachable, - } - try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished) - try func.emitWValue(ptr); - try func.emitWValue(offset); - switch (func.arch()) { - .wasm32 => try func.addTag(.i32_add), - .wasm64 => try func.addTag(.i64_add), - else => unreachable, - } - try func.emitWValue(value); - const mem_store_op: Mir.Inst.Tag = switch (func.arch()) { - .wasm32 => .i32_store8, - .wasm64 => .i64_store8, - else => unreachable, - }; - try func.addMemArg(mem_store_op, .{ .offset = ptr.offset(), .alignment = 1 }); - try func.emitWValue(offset); - try func.addImm32(1); - switch (func.arch()) { - .wasm32 => try func.addTag(.i32_add), - .wasm64 => try func.addTag(.i64_add), + .wasm32 => { + try func.emitWValue(.{ .imm32 = abi_size }); + try func.addTag(.i32_mul); + }, + .wasm64 => { + try func.emitWValue(.{ .imm64 = abi_size }); + try func.addTag(.i64_mul); + }, else => unreachable, } - try func.addLabel(.local_set, offset.local.value); - try func.addLabel(.br, 0); // jump to start of loop - try func.endBlock(); - try func.endBlock(); + try func.addLabel(.local_set, new_len.local.value); + break :blk new_len; + } else len, + }; + + var end_ptr = try func.allocLocal(Type.usize); + defer end_ptr.free(func); + var new_ptr = try func.buildPointerOffset(ptr, 0, .new); + defer new_ptr.free(func); + + // get the loop conditional: if current pointer address equals final pointer's address + try func.lowerToStack(ptr); + try func.emitWValue(final_len); + switch (func.arch()) { + .wasm32 => try func.addTag(.i32_add), + .wasm64 => try func.addTag(.i64_add), + else => unreachable, + } + try func.addLabel(.local_set, end_ptr.local.value); + + // outer block to jump to when loop is done + try func.startBlock(.block, wasm.block_empty); + try func.startBlock(.loop, wasm.block_empty); + + // check for codition for loop end + try func.emitWValue(new_ptr); + try func.emitWValue(end_ptr); + switch (func.arch()) { + .wasm32 => try func.addTag(.i32_eq), + .wasm64 => try func.addTag(.i64_eq), + else => unreachable, + } + try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished) + + // store the value at the current position of the pointer + try func.store(new_ptr, value, elem_ty, 0); + + // move the pointer to the next element + try func.emitWValue(new_ptr); + switch (func.arch()) { + .wasm32 => { + try func.emitWValue(.{ .imm32 = abi_size }); + try func.addTag(.i32_add); }, + .wasm64 => { + try func.emitWValue(.{ .imm64 = abi_size }); + try func.addTag(.i64_add); + }, + else => unreachable, } + try func.addLabel(.local_set, new_ptr.local.value); + + // end of loop + try func.addLabel(.br, 0); // jump to start of loop + try func.endBlock(); + try func.endBlock(); } fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { @@ -6007,10 +6033,12 @@ fn lowerTry( const liveness = func.liveness.getCondBr(inst); try func.branches.append(func.gpa, .{}); try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.else_deaths.len + liveness.then_deaths.len); + defer { + var branch = func.branches.pop(); + branch.deinit(func.gpa); + } try func.genBody(body); try func.endBlock(); - var branch = func.branches.pop(); - branch.deinit(func.gpa); } // if we reach here it means error was not set, and we want the payload diff --git a/test/behavior/memset.zig b/test/behavior/memset.zig index be09ef655f..32d9cf1eb8 100644 --- a/test/behavior/memset.zig +++ b/test/behavior/memset.zig @@ -7,10 +7,6 @@ test "@memset on array pointers" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) { - // TODO: implement memset when element ABI size > 1 - return error.SkipZigTest; - } try testMemsetArray(); try comptime testMemsetArray(); @@ -40,11 +36,6 @@ test "@memset on slices" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) { - // TODO: implement memset when element ABI size > 1 - // TODO: implement memset on slices - return error.SkipZigTest; - } try testMemsetSlice(); try comptime testMemsetSlice(); @@ -78,7 +69,6 @@ test "memset with bool element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; var buf: [5]bool = undefined; @memset(&buf, true); @@ -91,7 +81,6 @@ test "memset with 1-byte struct element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; const S = struct { x: bool }; var buf: [5]S = undefined; @@ -105,7 +94,6 @@ test "memset with 1-byte array element" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; const A = [1]bool; var buf: [5]A = undefined; @@ -119,7 +107,6 @@ test "memset with large array element, runtime known" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; const A = [128]u64; var buf: [5]A = undefined; @@ -137,7 +124,6 @@ test "memset with large array element, comptime known" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; const A = [128]u64; var buf: [5]A = undefined; -- cgit v1.2.3 From ca870aa00504accd49e7f1d2fceed1e4b8d21100 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Thu, 18 May 2023 18:08:10 +0200 Subject: wasm: fix `div_trunc` for floats For floats we would previously only do the division, but not the truncation for floats. This would result in incorrect values being returned. --- src/arch/wasm/CodeGen.zig | 26 ++++++++++++++++++++++---- test/behavior/math.zig | 1 - 2 files changed, 22 insertions(+), 5 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 78e722f794..5e36bb3f89 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1814,10 +1814,8 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .subwrap => func.airWrapBinOp(inst, .sub), .mul => func.airBinOp(inst, .mul), .mulwrap => func.airWrapBinOp(inst, .mul), - .div_float, - .div_exact, - .div_trunc, - => func.airDiv(inst), + .div_float, .div_exact => func.airDiv(inst), + .div_trunc => func.airDivTrunc(inst), .div_floor => func.airDivFloor(inst), .bit_and => func.airBinOp(inst, .@"and"), .bit_or => func.airBinOp(inst, .@"or"), @@ -6138,6 +6136,26 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } +fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const bin_op = func.air.instructions.items(.data)[inst].bin_op; + + const ty = func.air.typeOfIndex(inst); + const lhs = try func.resolveInst(bin_op.lhs); + const rhs = try func.resolveInst(bin_op.rhs); + + const div_result = if (ty.isSignedInt()) + try func.divSigned(lhs, rhs, ty) + else + try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); + + if (ty.isAnyFloat()) { + const trunc_result = try (try func.floatOp(.trunc, ty, &.{div_result})).toLocal(func, ty); + return func.finishAir(inst, trunc_result, &.{ bin_op.lhs, bin_op.rhs }); + } + + return func.finishAir(inst, div_result, &.{ bin_op.lhs, bin_op.rhs }); +} + fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; diff --git a/test/behavior/math.zig b/test/behavior/math.zig index cc85594c50..2d43ac306d 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -486,7 +486,6 @@ fn testDivision() !void { } test "division half-precision floats" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 832330094c00391ecd6f0ea4abf2d05261b5a10c Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Fri, 19 May 2023 20:14:34 +0200 Subject: wasm: aggregate_init - ensure zeroed result local When initializing a packed struct, we must ensure the result local is zero'd. Previously we would do this by ensuring a new local is allocated. Although a local is always zero by default, it meant that if such an initialization was being done inside a loop, it would re- use that very same local that could potentially still hold a different value. Because this value is `or`'d with the value, it would result in a miscompilation. By manually setting this result to 0, we guarantee the correct behavior. --- src/arch/wasm/CodeGen.zig | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 5e36bb3f89..11b7f65946 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4893,8 +4893,15 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const struct_obj = result_ty.castTag(.@"struct").?.data; const fields = struct_obj.fields.values(); const backing_type = struct_obj.backing_int_ty; - // we ensure a new local is created so it's zero-initialized - const result = try func.ensureAllocLocal(backing_type); + + // ensure the result is zero'd + const result = try func.allocLocal(backing_type); + if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + try func.addImm32(0) + else + try func.addImm64(0); + try func.addLabel(.local_set, result.local.value); + var current_bit: u16 = 0; for (elements, 0..) |elem, elem_index| { const field = fields[elem_index]; -- cgit v1.2.3 From 3db3cf77904e664d589287602c14168a7a63f125 Mon Sep 17 00:00:00 2001 From: Ali Chraghi Date: Tue, 23 May 2023 15:33:12 +0330 Subject: std.sort: add pdqsort and heapsort --- lib/std/compress/deflate/huffman_code.zig | 4 +- lib/std/compress/zstandard/decode/fse.zig | 2 +- lib/std/compress/zstandard/decode/huffman.zig | 2 +- lib/std/comptime_string_map.zig | 2 +- lib/std/debug.zig | 2 +- lib/std/enums.zig | 2 +- lib/std/http/Headers.zig | 2 +- lib/std/mem.zig | 28 + lib/std/meta.zig | 2 +- lib/std/multi_array_list.zig | 7 +- lib/std/net.zig | 2 +- lib/std/sort.zig | 1471 ++++--------------------- lib/std/sort/block.zig | 1066 ++++++++++++++++++ lib/std/sort/pdq.zig | 331 ++++++ src/Compilation.zig | 2 +- src/Package.zig | 2 +- src/RangeSet.zig | 2 +- src/Sema.zig | 2 +- src/arch/x86_64/CodeGen.zig | 2 +- src/arch/x86_64/Encoding.zig | 2 +- src/codegen/c/type.zig | 2 +- src/link/Coff.zig | 2 +- src/link/MachO/Object.zig | 8 +- src/link/MachO/UnwindInfo.zig | 2 +- src/link/MachO/dyld_info/Rebase.zig | 2 +- src/link/MachO/dyld_info/bind.zig | 2 +- src/link/MachO/zld.zig | 4 +- src/link/Wasm.zig | 10 +- src/objcopy.zig | 4 +- test/src/Cases.zig | 2 +- tools/gen_stubs.zig | 2 +- tools/generate_JSONTestSuite.zig | 2 +- tools/process_headers.zig | 2 +- tools/update-linux-headers.zig | 2 +- tools/update_clang_options.zig | 2 +- tools/update_cpu_features.zig | 8 +- tools/update_spirv_features.zig | 2 +- 37 files changed, 1702 insertions(+), 1291 deletions(-) create mode 100644 lib/std/sort/block.zig create mode 100644 lib/std/sort/pdq.zig (limited to 'src/arch') diff --git a/lib/std/compress/deflate/huffman_code.zig b/lib/std/compress/deflate/huffman_code.zig index 4827feb245..689ac1441a 100644 --- a/lib/std/compress/deflate/huffman_code.zig +++ b/lib/std/compress/deflate/huffman_code.zig @@ -93,7 +93,7 @@ pub const HuffmanEncoder = struct { return; } self.lfs = list; - sort.sort(LiteralNode, self.lfs, {}, byFreq); + mem.sort(LiteralNode, self.lfs, {}, byFreq); // Get the number of literals for each bit count var bit_count = self.bitCounts(list, max_bits); @@ -270,7 +270,7 @@ pub const HuffmanEncoder = struct { var chunk = list[list.len - @intCast(u32, bits) ..]; self.lns = chunk; - sort.sort(LiteralNode, self.lns, {}, byLiteral); + mem.sort(LiteralNode, self.lns, {}, byLiteral); for (chunk) |node| { self.codes[node.literal] = HuffCode{ diff --git a/lib/std/compress/zstandard/decode/fse.zig b/lib/std/compress/zstandard/decode/fse.zig index 741fd81ccc..232af39ccf 100644 --- a/lib/std/compress/zstandard/decode/fse.zig +++ b/lib/std/compress/zstandard/decode/fse.zig @@ -107,7 +107,7 @@ fn buildFseTable(values: []const u16, entries: []Table.Fse) !void { position &= entries.len - 1; } } - std.sort.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16)); + std.mem.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16)); for (0..probability) |i| { entries[temp_states[i]] = if (i < double_state_count) Table.Fse{ .symbol = @intCast(u8, symbol), diff --git a/lib/std/compress/zstandard/decode/huffman.zig b/lib/std/compress/zstandard/decode/huffman.zig index 2914198268..f5e977d0da 100644 --- a/lib/std/compress/zstandard/decode/huffman.zig +++ b/lib/std/compress/zstandard/decode/huffman.zig @@ -124,7 +124,7 @@ fn assignSymbols(weight_sorted_prefixed_symbols: []LiteralsSection.HuffmanTree.P }; } - std.sort.sort( + std.mem.sort( LiteralsSection.HuffmanTree.PrefixedSymbol, weight_sorted_prefixed_symbols, weights, diff --git a/lib/std/comptime_string_map.zig b/lib/std/comptime_string_map.zig index 7620ec7af8..e6859c32c1 100644 --- a/lib/std/comptime_string_map.zig +++ b/lib/std/comptime_string_map.zig @@ -28,7 +28,7 @@ pub fn ComptimeStringMap(comptime V: type, comptime kvs_list: anytype) type { sorted_kvs[i] = .{ .key = kv.@"0", .value = {} }; } } - std.sort.sort(KV, &sorted_kvs, {}, lenAsc); + mem.sort(KV, &sorted_kvs, {}, lenAsc); const min_len = sorted_kvs[0].key.len; const max_len = sorted_kvs[sorted_kvs.len - 1].key.len; var len_indexes: [max_len + 1]usize = undefined; diff --git a/lib/std/debug.zig b/lib/std/debug.zig index ecc1a9f0cf..005c2b5404 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1211,7 +1211,7 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn // Even though lld emits symbols in ascending order, this debug code // should work for programs linked in any valid way. // This sort is so that we can binary search later. - std.sort.sort(MachoSymbol, symbols, {}, MachoSymbol.addressLessThan); + mem.sort(MachoSymbol, symbols, {}, MachoSymbol.addressLessThan); return ModuleDebugInfo{ .base_address = undefined, diff --git a/lib/std/enums.zig b/lib/std/enums.zig index aa6edd60b1..757c616b9b 100644 --- a/lib/std/enums.zig +++ b/lib/std/enums.zig @@ -1314,7 +1314,7 @@ pub fn EnumIndexer(comptime E: type) type { } }; } - std.sort.sort(EnumField, &fields, {}, ascByValue); + std.mem.sort(EnumField, &fields, {}, ascByValue); const min = fields[0].value; const max = fields[fields.len - 1].value; const fields_len = fields.len; diff --git a/lib/std/http/Headers.zig b/lib/std/http/Headers.zig index 429df9368a..fb7a9360d8 100644 --- a/lib/std/http/Headers.zig +++ b/lib/std/http/Headers.zig @@ -191,7 +191,7 @@ pub const Headers = struct { /// Sorts the headers in lexicographical order. pub fn sort(headers: *Headers) void { - std.sort.sort(Field, headers.list.items, {}, Field.lessThan); + std.mem.sort(Field, headers.list.items, {}, Field.lessThan); headers.rebuildIndex(); } diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 8cb2c00a3a..311c97c254 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -566,6 +566,34 @@ test "zeroInit" { }, nested_baz); } +pub fn sort( + comptime T: type, + items: []T, + context: anytype, + comptime lessThanFn: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + std.sort.block(T, items, context, lessThanFn); +} + +pub fn sortUnstable( + comptime T: type, + items: []T, + context: anytype, + comptime lessThanFn: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + std.sort.pdq(T, items, context, lessThanFn); +} + +/// TODO: currently this just calls `insertionSortContext`. The block sort implementation +/// in this file needs to be adapted to use the sort context. +pub fn sortContext(a: usize, b: usize, context: anytype) void { + std.sort.insertionContext(a, b, context); +} + +pub fn sortUnstableContext(a: usize, b: usize, context: anytype) void { + std.sort.pdqContext(a, b, context); +} + /// Compares two slices of numbers lexicographically. O(n). pub fn order(comptime T: type, lhs: []const T, rhs: []const T) math.Order { const n = math.min(lhs.len, rhs.len); diff --git a/lib/std/meta.zig b/lib/std/meta.zig index 8adba2439a..d0b07b934f 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -985,7 +985,7 @@ pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const De for (decls, 0..) |decl, i| { array[i] = &@field(Namespace, decl.name); } - std.sort.sort(*const Decl, &array, {}, S.declNameLessThan); + mem.sort(*const Decl, &array, {}, S.declNameLessThan); return &array; } } diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index 322471bedf..44e226be33 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -160,7 +160,7 @@ pub fn MultiArrayList(comptime T: type) type { return lhs.alignment > rhs.alignment; } }; - std.sort.sort(Data, &data, {}, Sort.lessThan); + mem.sort(Data, &data, {}, Sort.lessThan); var sizes_bytes: [fields.len]usize = undefined; var field_indexes: [fields.len]usize = undefined; for (data, 0..) |elem, i| { @@ -488,10 +488,7 @@ pub fn MultiArrayList(comptime T: type) type { } }; - std.sort.sortContext(self.len, SortContext{ - .sub_ctx = ctx, - .slice = self.slice(), - }); + mem.sortContext(0, self.len, SortContext{ .sub_ctx = ctx, .slice = self.slice() }); } fn capacityInBytes(capacity: usize) usize { diff --git a/lib/std/net.zig b/lib/std/net.zig index 57e50a7349..7629ecc8f7 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1082,7 +1082,7 @@ fn linuxLookupName( key |= (MAXADDRS - @intCast(i32, i)) << DAS_ORDER_SHIFT; addr.sortkey = key; } - std.sort.sort(LookupAddr, addrs.items, {}, addrCmpLessThan); + mem.sort(LookupAddr, addrs.items, {}, addrCmpLessThan); } const Policy = struct { diff --git a/lib/std/sort.zig b/lib/std/sort.zig index 3e219b8566..bf2bf40f89 100644 --- a/lib/std/sort.zig +++ b/lib/std/sort.zig @@ -4,1241 +4,152 @@ const testing = std.testing; const mem = std.mem; const math = std.math; -pub fn binarySearch( - comptime T: type, - key: anytype, - items: []const T, - context: anytype, - comptime compareFn: fn (context: @TypeOf(context), key: @TypeOf(key), mid_item: T) math.Order, -) ?usize { - var left: usize = 0; - var right: usize = items.len; - - while (left < right) { - // Avoid overflowing in the midpoint calculation - const mid = left + (right - left) / 2; - // Compare the key with the midpoint element - switch (compareFn(context, key, items[mid])) { - .eq => return mid, - .gt => left = mid + 1, - .lt => right = mid, - } - } - - return null; -} - -test "binarySearch" { - const S = struct { - fn order_u32(context: void, lhs: u32, rhs: u32) math.Order { - _ = context; - return math.order(lhs, rhs); - } - fn order_i32(context: void, lhs: i32, rhs: i32) math.Order { - _ = context; - return math.order(lhs, rhs); - } - }; - try testing.expectEqual( - @as(?usize, null), - binarySearch(u32, @as(u32, 1), &[_]u32{}, {}, S.order_u32), - ); - try testing.expectEqual( - @as(?usize, 0), - binarySearch(u32, @as(u32, 1), &[_]u32{1}, {}, S.order_u32), - ); - try testing.expectEqual( - @as(?usize, null), - binarySearch(u32, @as(u32, 1), &[_]u32{0}, {}, S.order_u32), - ); - try testing.expectEqual( - @as(?usize, null), - binarySearch(u32, @as(u32, 0), &[_]u32{1}, {}, S.order_u32), - ); - try testing.expectEqual( - @as(?usize, 4), - binarySearch(u32, @as(u32, 5), &[_]u32{ 1, 2, 3, 4, 5 }, {}, S.order_u32), - ); - try testing.expectEqual( - @as(?usize, 0), - binarySearch(u32, @as(u32, 2), &[_]u32{ 2, 4, 8, 16, 32, 64 }, {}, S.order_u32), - ); - try testing.expectEqual( - @as(?usize, 1), - binarySearch(i32, @as(i32, -4), &[_]i32{ -7, -4, 0, 9, 10 }, {}, S.order_i32), - ); - try testing.expectEqual( - @as(?usize, 3), - binarySearch(i32, @as(i32, 98), &[_]i32{ -100, -25, 2, 98, 99, 100 }, {}, S.order_i32), - ); - const R = struct { - b: i32, - e: i32, - - fn r(b: i32, e: i32) @This() { - return @This(){ .b = b, .e = e }; - } - - fn order(context: void, key: i32, mid_item: @This()) math.Order { - _ = context; - - if (key < mid_item.b) { - return .lt; - } - - if (key > mid_item.e) { - return .gt; - } - - return .eq; - } - }; - try testing.expectEqual( - @as(?usize, null), - binarySearch(R, @as(i32, -45), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order), - ); - try testing.expectEqual( - @as(?usize, 2), - binarySearch(R, @as(i32, 10), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order), - ); - try testing.expectEqual( - @as(?usize, 1), - binarySearch(R, @as(i32, -20), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order), - ); -} +pub const block = @import("sort/block.zig").block; +pub const pdq = @import("sort/pdq.zig").pdq; +pub const pdqContext = @import("sort/pdq.zig").pdqContext; /// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. /// O(1) memory (no allocator required). /// Sorts in ascending order with respect to the given `lessThan` function. -/// This can be expressed in terms of `insertionSortContext` but the glue -/// code is slightly longer than the direct implementation. -pub fn insertionSort( +pub fn insertion( comptime T: type, items: []T, context: anytype, - comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, + comptime lessThanFn: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) void { - var i: usize = 1; - while (i < items.len) : (i += 1) { - const x = items[i]; - var j: usize = i; - while (j > 0 and lessThan(context, x, items[j - 1])) : (j -= 1) { - items[j] = items[j - 1]; + const Context = struct { + items: []T, + sub_ctx: @TypeOf(context), + + pub fn lessThan(ctx: @This(), a: usize, b: usize) bool { + return lessThanFn(ctx.sub_ctx, ctx.items[a], ctx.items[b]); } - items[j] = x; - } + + pub fn swap(ctx: @This(), a: usize, b: usize) void { + return mem.swap(T, &ctx.items[a], &ctx.items[b]); + } + }; + insertionContext(0, items.len, Context{ .items = items, .sub_ctx = context }); } /// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. /// O(1) memory (no allocator required). -/// Sorts in ascending order with respect to the given `context.lessThan` function. -pub fn insertionSortContext(len: usize, context: anytype) void { - var i: usize = 1; - while (i < len) : (i += 1) { - var j: usize = i; - while (j > 0 and context.lessThan(j, j - 1)) : (j -= 1) { +/// Sorts in ascending order with respect to the given `lessThan` function. +pub fn insertionContext(a: usize, b: usize, context: anytype) void { + var i = a + 1; + while (i < b) : (i += 1) { + var j = i; + while (j > a and context.lessThan(j, j - 1)) : (j -= 1) { context.swap(j, j - 1); } } } -const Range = struct { - start: usize, - end: usize, - - fn init(start: usize, end: usize) Range { - return Range{ - .start = start, - .end = end, - }; - } - - fn length(self: Range) usize { - return self.end - self.start; - } -}; - -const Iterator = struct { - size: usize, - power_of_two: usize, - numerator: usize, - decimal: usize, - denominator: usize, - decimal_step: usize, - numerator_step: usize, - - fn init(size2: usize, min_level: usize) Iterator { - const power_of_two = math.floorPowerOfTwo(usize, size2); - const denominator = power_of_two / min_level; - return Iterator{ - .numerator = 0, - .decimal = 0, - .size = size2, - .power_of_two = power_of_two, - .denominator = denominator, - .decimal_step = size2 / denominator, - .numerator_step = size2 % denominator, - }; - } - - fn begin(self: *Iterator) void { - self.numerator = 0; - self.decimal = 0; - } - - fn nextRange(self: *Iterator) Range { - const start = self.decimal; - - self.decimal += self.decimal_step; - self.numerator += self.numerator_step; - if (self.numerator >= self.denominator) { - self.numerator -= self.denominator; - self.decimal += 1; - } - - return Range{ - .start = start, - .end = self.decimal, - }; - } - - fn finished(self: *Iterator) bool { - return self.decimal >= self.size; - } - - fn nextLevel(self: *Iterator) bool { - self.decimal_step += self.decimal_step; - self.numerator_step += self.numerator_step; - if (self.numerator_step >= self.denominator) { - self.numerator_step -= self.denominator; - self.decimal_step += 1; - } - - return (self.decimal_step < self.size); - } - - fn length(self: *Iterator) usize { - return self.decimal_step; - } -}; - -const Pull = struct { - from: usize, - to: usize, - count: usize, - range: Range, -}; - -/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. +/// Unstable in-place sort. O(n*log(n)) best case, worst case and average case. /// O(1) memory (no allocator required). /// Sorts in ascending order with respect to the given `lessThan` function. -/// Currently implemented as block sort. -pub fn sort( +pub fn heap( comptime T: type, items: []T, context: anytype, - comptime lessThan: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, + comptime lessThanFn: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) void { + const Context = struct { + items: []T, + sub_ctx: @TypeOf(context), - // Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c - var cache: [512]T = undefined; - - if (items.len < 4) { - if (items.len == 3) { - // hard coded insertion sort - if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); - if (lessThan(context, items[2], items[1])) { - mem.swap(T, &items[1], &items[2]); - if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); - } - } else if (items.len == 2) { - if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); + pub fn lessThan(ctx: @This(), a: usize, b: usize) bool { + return lessThanFn(ctx.sub_ctx, ctx.items[a], ctx.items[b]); } - return; - } - - // sort groups of 4-8 items at a time using an unstable sorting network, - // but keep track of the original item orders to force it to be stable - // http://pages.ripco.net/~jgamble/nw.html - var iterator = Iterator.init(items.len, 4); - while (!iterator.finished()) { - var order = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7 }; - const range = iterator.nextRange(); - - const sliced_items = items[range.start..]; - switch (range.length()) { - 8 => { - swap(T, sliced_items, context, lessThan, &order, 0, 1); - swap(T, sliced_items, context, lessThan, &order, 2, 3); - swap(T, sliced_items, context, lessThan, &order, 4, 5); - swap(T, sliced_items, context, lessThan, &order, 6, 7); - swap(T, sliced_items, context, lessThan, &order, 0, 2); - swap(T, sliced_items, context, lessThan, &order, 1, 3); - swap(T, sliced_items, context, lessThan, &order, 4, 6); - swap(T, sliced_items, context, lessThan, &order, 5, 7); - swap(T, sliced_items, context, lessThan, &order, 1, 2); - swap(T, sliced_items, context, lessThan, &order, 5, 6); - swap(T, sliced_items, context, lessThan, &order, 0, 4); - swap(T, sliced_items, context, lessThan, &order, 3, 7); - swap(T, sliced_items, context, lessThan, &order, 1, 5); - swap(T, sliced_items, context, lessThan, &order, 2, 6); - swap(T, sliced_items, context, lessThan, &order, 1, 4); - swap(T, sliced_items, context, lessThan, &order, 3, 6); - swap(T, sliced_items, context, lessThan, &order, 2, 4); - swap(T, sliced_items, context, lessThan, &order, 3, 5); - swap(T, sliced_items, context, lessThan, &order, 3, 4); - }, - 7 => { - swap(T, sliced_items, context, lessThan, &order, 1, 2); - swap(T, sliced_items, context, lessThan, &order, 3, 4); - swap(T, sliced_items, context, lessThan, &order, 5, 6); - swap(T, sliced_items, context, lessThan, &order, 0, 2); - swap(T, sliced_items, context, lessThan, &order, 3, 5); - swap(T, sliced_items, context, lessThan, &order, 4, 6); - swap(T, sliced_items, context, lessThan, &order, 0, 1); - swap(T, sliced_items, context, lessThan, &order, 4, 5); - swap(T, sliced_items, context, lessThan, &order, 2, 6); - swap(T, sliced_items, context, lessThan, &order, 0, 4); - swap(T, sliced_items, context, lessThan, &order, 1, 5); - swap(T, sliced_items, context, lessThan, &order, 0, 3); - swap(T, sliced_items, context, lessThan, &order, 2, 5); - swap(T, sliced_items, context, lessThan, &order, 1, 3); - swap(T, sliced_items, context, lessThan, &order, 2, 4); - swap(T, sliced_items, context, lessThan, &order, 2, 3); - }, - 6 => { - swap(T, sliced_items, context, lessThan, &order, 1, 2); - swap(T, sliced_items, context, lessThan, &order, 4, 5); - swap(T, sliced_items, context, lessThan, &order, 0, 2); - swap(T, sliced_items, context, lessThan, &order, 3, 5); - swap(T, sliced_items, context, lessThan, &order, 0, 1); - swap(T, sliced_items, context, lessThan, &order, 3, 4); - swap(T, sliced_items, context, lessThan, &order, 2, 5); - swap(T, sliced_items, context, lessThan, &order, 0, 3); - swap(T, sliced_items, context, lessThan, &order, 1, 4); - swap(T, sliced_items, context, lessThan, &order, 2, 4); - swap(T, sliced_items, context, lessThan, &order, 1, 3); - swap(T, sliced_items, context, lessThan, &order, 2, 3); - }, - 5 => { - swap(T, sliced_items, context, lessThan, &order, 0, 1); - swap(T, sliced_items, context, lessThan, &order, 3, 4); - swap(T, sliced_items, context, lessThan, &order, 2, 4); - swap(T, sliced_items, context, lessThan, &order, 2, 3); - swap(T, sliced_items, context, lessThan, &order, 1, 4); - swap(T, sliced_items, context, lessThan, &order, 0, 3); - swap(T, sliced_items, context, lessThan, &order, 0, 2); - swap(T, sliced_items, context, lessThan, &order, 1, 3); - swap(T, sliced_items, context, lessThan, &order, 1, 2); - }, - 4 => { - swap(T, sliced_items, context, lessThan, &order, 0, 1); - swap(T, sliced_items, context, lessThan, &order, 2, 3); - swap(T, sliced_items, context, lessThan, &order, 0, 2); - swap(T, sliced_items, context, lessThan, &order, 1, 3); - swap(T, sliced_items, context, lessThan, &order, 1, 2); - }, - else => {}, - } - } - if (items.len < 8) return; - - // then merge sort the higher levels, which can be 8-15, 16-31, 32-63, 64-127, etc. - while (true) { - // if every A and B block will fit into the cache, use a special branch - // specifically for merging with the cache - // (we use < rather than <= since the block size might be one more than - // iterator.length()) - if (iterator.length() < cache.len) { - // if four subarrays fit into the cache, it's faster to merge both - // pairs of subarrays into the cache, - // then merge the two merged subarrays from the cache back into the original array - if ((iterator.length() + 1) * 4 <= cache.len and iterator.length() * 4 <= items.len) { - iterator.begin(); - while (!iterator.finished()) { - // merge A1 and B1 into the cache - var A1 = iterator.nextRange(); - var B1 = iterator.nextRange(); - var A2 = iterator.nextRange(); - var B2 = iterator.nextRange(); - - if (lessThan(context, items[B1.end - 1], items[A1.start])) { - // the two ranges are in reverse order, so copy them in reverse order into the cache - const a1_items = items[A1.start..A1.end]; - @memcpy(cache[B1.length()..][0..a1_items.len], a1_items); - const b1_items = items[B1.start..B1.end]; - @memcpy(cache[0..b1_items.len], b1_items); - } else if (lessThan(context, items[B1.start], items[A1.end - 1])) { - // these two ranges weren't already in order, so merge them into the cache - mergeInto(T, items, A1, B1, context, lessThan, cache[0..]); - } else { - // if A1, B1, A2, and B2 are all in order, skip doing anything else - if (!lessThan(context, items[B2.start], items[A2.end - 1]) and !lessThan(context, items[A2.start], items[B1.end - 1])) continue; - - // copy A1 and B1 into the cache in the same order - const a1_items = items[A1.start..A1.end]; - @memcpy(cache[0..a1_items.len], a1_items); - const b1_items = items[B1.start..B1.end]; - @memcpy(cache[A1.length()..][0..b1_items.len], b1_items); - } - A1 = Range.init(A1.start, B1.end); - - // merge A2 and B2 into the cache - if (lessThan(context, items[B2.end - 1], items[A2.start])) { - // the two ranges are in reverse order, so copy them in reverse order into the cache - const a2_items = items[A2.start..A2.end]; - @memcpy(cache[A1.length() + B2.length() ..][0..a2_items.len], a2_items); - const b2_items = items[B2.start..B2.end]; - @memcpy(cache[A1.length()..][0..b2_items.len], b2_items); - } else if (lessThan(context, items[B2.start], items[A2.end - 1])) { - // these two ranges weren't already in order, so merge them into the cache - mergeInto(T, items, A2, B2, context, lessThan, cache[A1.length()..]); - } else { - // copy A2 and B2 into the cache in the same order - const a2_items = items[A2.start..A2.end]; - @memcpy(cache[A1.length()..][0..a2_items.len], a2_items); - const b2_items = items[B2.start..B2.end]; - @memcpy(cache[A1.length() + A2.length() ..][0..b2_items.len], b2_items); - } - A2 = Range.init(A2.start, B2.end); - - // merge A1 and A2 from the cache into the items - const A3 = Range.init(0, A1.length()); - const B3 = Range.init(A1.length(), A1.length() + A2.length()); - - if (lessThan(context, cache[B3.end - 1], cache[A3.start])) { - // the two ranges are in reverse order, so copy them in reverse order into the items - const a3_items = cache[A3.start..A3.end]; - @memcpy(items[A1.start + A2.length() ..][0..a3_items.len], a3_items); - const b3_items = cache[B3.start..B3.end]; - @memcpy(items[A1.start..][0..b3_items.len], b3_items); - } else if (lessThan(context, cache[B3.start], cache[A3.end - 1])) { - // these two ranges weren't already in order, so merge them back into the items - mergeInto(T, cache[0..], A3, B3, context, lessThan, items[A1.start..]); - } else { - // copy A3 and B3 into the items in the same order - const a3_items = cache[A3.start..A3.end]; - @memcpy(items[A1.start..][0..a3_items.len], a3_items); - const b3_items = cache[B3.start..B3.end]; - @memcpy(items[A1.start + A1.length() ..][0..b3_items.len], b3_items); - } - } - - // we merged two levels at the same time, so we're done with this level already - // (iterator.nextLevel() is called again at the bottom of this outer merge loop) - _ = iterator.nextLevel(); - } else { - iterator.begin(); - while (!iterator.finished()) { - var A = iterator.nextRange(); - var B = iterator.nextRange(); - - if (lessThan(context, items[B.end - 1], items[A.start])) { - // the two ranges are in reverse order, so a simple rotation should fix it - mem.rotate(T, items[A.start..B.end], A.length()); - } else if (lessThan(context, items[B.start], items[A.end - 1])) { - // these two ranges weren't already in order, so we'll need to merge them! - const a_items = items[A.start..A.end]; - @memcpy(cache[0..a_items.len], a_items); - mergeExternal(T, items, A, B, context, lessThan, cache[0..]); - } - } - } - } else { - // this is where the in-place merge logic starts! - // 1. pull out two internal buffers each containing √A unique values - // 1a. adjust block_size and buffer_size if we couldn't find enough unique values - // 2. loop over the A and B subarrays within this level of the merge sort - // 3. break A and B into blocks of size 'block_size' - // 4. "tag" each of the A blocks with values from the first internal buffer - // 5. roll the A blocks through the B blocks and drop/rotate them where they belong - // 6. merge each A block with any B values that follow, using the cache or the second internal buffer - // 7. sort the second internal buffer if it exists - // 8. redistribute the two internal buffers back into the items - var block_size: usize = math.sqrt(iterator.length()); - var buffer_size = iterator.length() / block_size + 1; - - // as an optimization, we really only need to pull out the internal buffers once for each level of merges - // after that we can reuse the same buffers over and over, then redistribute it when we're finished with this level - var A: Range = undefined; - var B: Range = undefined; - var index: usize = 0; - var last: usize = 0; - var count: usize = 0; - var find: usize = 0; - var start: usize = 0; - var pull_index: usize = 0; - var pull = [_]Pull{ - Pull{ - .from = 0, - .to = 0, - .count = 0, - .range = Range.init(0, 0), - }, - Pull{ - .from = 0, - .to = 0, - .count = 0, - .range = Range.init(0, 0), - }, - }; - - var buffer1 = Range.init(0, 0); - var buffer2 = Range.init(0, 0); - - // find two internal buffers of size 'buffer_size' each - find = buffer_size + buffer_size; - var find_separately = false; - - if (block_size <= cache.len) { - // if every A block fits into the cache then we won't need the second internal buffer, - // so we really only need to find 'buffer_size' unique values - find = buffer_size; - } else if (find > iterator.length()) { - // we can't fit both buffers into the same A or B subarray, so find two buffers separately - find = buffer_size; - find_separately = true; - } - - // we need to find either a single contiguous space containing 2√A unique values (which will be split up into two buffers of size √A each), - // or we need to find one buffer of < 2√A unique values, and a second buffer of √A unique values, - // OR if we couldn't find that many unique values, we need the largest possible buffer we can get - - // in the case where it couldn't find a single buffer of at least √A unique values, - // all of the Merge steps must be replaced by a different merge algorithm (MergeInPlace) - iterator.begin(); - while (!iterator.finished()) { - A = iterator.nextRange(); - B = iterator.nextRange(); - - // just store information about where the values will be pulled from and to, - // as well as how many values there are, to create the two internal buffers - - // check A for the number of unique values we need to fill an internal buffer - // these values will be pulled out to the start of A - last = A.start; - count = 1; - while (count < find) : ({ - last = index; - count += 1; - }) { - index = findLastForward(T, items, items[last], Range.init(last + 1, A.end), context, lessThan, find - count); - if (index == A.end) break; - } - index = last; - - if (count >= buffer_size) { - // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffer - pull[pull_index] = Pull{ - .range = Range.init(A.start, B.end), - .count = count, - .from = index, - .to = A.start, - }; - pull_index = 1; - - if (count == buffer_size + buffer_size) { - // we were able to find a single contiguous section containing 2√A unique values, - // so this section can be used to contain both of the internal buffers we'll need - buffer1 = Range.init(A.start, A.start + buffer_size); - buffer2 = Range.init(A.start + buffer_size, A.start + count); - break; - } else if (find == buffer_size + buffer_size) { - // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values, - // so we still need to find a second separate buffer of at least √A unique values - buffer1 = Range.init(A.start, A.start + count); - find = buffer_size; - } else if (block_size <= cache.len) { - // we found the first and only internal buffer that we need, so we're done! - buffer1 = Range.init(A.start, A.start + count); - break; - } else if (find_separately) { - // found one buffer, but now find the other one - buffer1 = Range.init(A.start, A.start + count); - find_separately = false; - } else { - // we found a second buffer in an 'A' subarray containing √A unique values, so we're done! - buffer2 = Range.init(A.start, A.start + count); - break; - } - } else if (pull_index == 0 and count > buffer1.length()) { - // keep track of the largest buffer we were able to find - buffer1 = Range.init(A.start, A.start + count); - pull[pull_index] = Pull{ - .range = Range.init(A.start, B.end), - .count = count, - .from = index, - .to = A.start, - }; - } - - // check B for the number of unique values we need to fill an internal buffer - // these values will be pulled out to the end of B - last = B.end - 1; - count = 1; - while (count < find) : ({ - last = index - 1; - count += 1; - }) { - index = findFirstBackward(T, items, items[last], Range.init(B.start, last), context, lessThan, find - count); - if (index == B.start) break; - } - index = last; - if (count >= buffer_size) { - // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffe - pull[pull_index] = Pull{ - .range = Range.init(A.start, B.end), - .count = count, - .from = index, - .to = B.end, - }; - pull_index = 1; - - if (count == buffer_size + buffer_size) { - // we were able to find a single contiguous section containing 2√A unique values, - // so this section can be used to contain both of the internal buffers we'll need - buffer1 = Range.init(B.end - count, B.end - buffer_size); - buffer2 = Range.init(B.end - buffer_size, B.end); - break; - } else if (find == buffer_size + buffer_size) { - // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values, - // so we still need to find a second separate buffer of at least √A unique values - buffer1 = Range.init(B.end - count, B.end); - find = buffer_size; - } else if (block_size <= cache.len) { - // we found the first and only internal buffer that we need, so we're done! - buffer1 = Range.init(B.end - count, B.end); - break; - } else if (find_separately) { - // found one buffer, but now find the other one - buffer1 = Range.init(B.end - count, B.end); - find_separately = false; - } else { - // buffer2 will be pulled out from a 'B' subarray, so if the first buffer was pulled out from the corresponding 'A' subarray, - // we need to adjust the end point for that A subarray so it knows to stop redistributing its values before reaching buffer2 - if (pull[0].range.start == A.start) pull[0].range.end -= pull[1].count; - - // we found a second buffer in an 'B' subarray containing √A unique values, so we're done! - buffer2 = Range.init(B.end - count, B.end); - break; - } - } else if (pull_index == 0 and count > buffer1.length()) { - // keep track of the largest buffer we were able to find - buffer1 = Range.init(B.end - count, B.end); - pull[pull_index] = Pull{ - .range = Range.init(A.start, B.end), - .count = count, - .from = index, - .to = B.end, - }; - } - } - - // pull out the two ranges so we can use them as internal buffers - pull_index = 0; - while (pull_index < 2) : (pull_index += 1) { - const length = pull[pull_index].count; - - if (pull[pull_index].to < pull[pull_index].from) { - // we're pulling the values out to the left, which means the start of an A subarray - index = pull[pull_index].from; - count = 1; - while (count < length) : (count += 1) { - index = findFirstBackward(T, items, items[index - 1], Range.init(pull[pull_index].to, pull[pull_index].from - (count - 1)), context, lessThan, length - count); - const range = Range.init(index + 1, pull[pull_index].from + 1); - mem.rotate(T, items[range.start..range.end], range.length() - count); - pull[pull_index].from = index + count; - } - } else if (pull[pull_index].to > pull[pull_index].from) { - // we're pulling values out to the right, which means the end of a B subarray - index = pull[pull_index].from + 1; - count = 1; - while (count < length) : (count += 1) { - index = findLastForward(T, items, items[index], Range.init(index, pull[pull_index].to), context, lessThan, length - count); - const range = Range.init(pull[pull_index].from, index - 1); - mem.rotate(T, items[range.start..range.end], count); - pull[pull_index].from = index - 1 - count; - } - } - } - - // adjust block_size and buffer_size based on the values we were able to pull out - buffer_size = buffer1.length(); - block_size = iterator.length() / buffer_size + 1; - - // the first buffer NEEDS to be large enough to tag each of the evenly sized A blocks, - // so this was originally here to test the math for adjusting block_size above - // assert((iterator.length() + 1)/block_size <= buffer_size); - - // now that the two internal buffers have been created, it's time to merge each A+B combination at this level of the merge sort! - iterator.begin(); - while (!iterator.finished()) { - A = iterator.nextRange(); - B = iterator.nextRange(); - - // remove any parts of A or B that are being used by the internal buffers - start = A.start; - if (start == pull[0].range.start) { - if (pull[0].from > pull[0].to) { - A.start += pull[0].count; - - // if the internal buffer takes up the entire A or B subarray, then there's nothing to merge - // this only happens for very small subarrays, like √4 = 2, 2 * (2 internal buffers) = 4, - // which also only happens when cache.len is small or 0 since it'd otherwise use MergeExternal - if (A.length() == 0) continue; - } else if (pull[0].from < pull[0].to) { - B.end -= pull[0].count; - if (B.length() == 0) continue; - } - } - if (start == pull[1].range.start) { - if (pull[1].from > pull[1].to) { - A.start += pull[1].count; - if (A.length() == 0) continue; - } else if (pull[1].from < pull[1].to) { - B.end -= pull[1].count; - if (B.length() == 0) continue; - } - } - - if (lessThan(context, items[B.end - 1], items[A.start])) { - // the two ranges are in reverse order, so a simple rotation should fix it - mem.rotate(T, items[A.start..B.end], A.length()); - } else if (lessThan(context, items[A.end], items[A.end - 1])) { - // these two ranges weren't already in order, so we'll need to merge them! - var findA: usize = undefined; - - // break the remainder of A into blocks. firstA is the uneven-sized first A block - var blockA = Range.init(A.start, A.end); - var firstA = Range.init(A.start, A.start + blockA.length() % block_size); - - // swap the first value of each A block with the value in buffer1 - var indexA = buffer1.start; - index = firstA.end; - while (index < blockA.end) : ({ - indexA += 1; - index += block_size; - }) { - mem.swap(T, &items[indexA], &items[index]); - } - - // start rolling the A blocks through the B blocks! - // whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well - var lastA = firstA; - var lastB = Range.init(0, 0); - var blockB = Range.init(B.start, B.start + math.min(block_size, B.length())); - blockA.start += firstA.length(); - indexA = buffer1.start; - - // if the first unevenly sized A block fits into the cache, copy it there for when we go to Merge it - // otherwise, if the second buffer is available, block swap the contents into that - if (lastA.length() <= cache.len) { - const last_a_items = items[lastA.start..lastA.end]; - @memcpy(cache[0..last_a_items.len], last_a_items); - } else if (buffer2.length() > 0) { - blockSwap(T, items, lastA.start, buffer2.start, lastA.length()); - } - - if (blockA.length() > 0) { - while (true) { - // if there's a previous B block and the first value of the minimum A block is <= the last value of the previous B block, - // then drop that minimum A block behind. or if there are no B blocks left then keep dropping the remaining A blocks. - if ((lastB.length() > 0 and !lessThan(context, items[lastB.end - 1], items[indexA])) or blockB.length() == 0) { - // figure out where to split the previous B block, and rotate it at the split - const B_split = binaryFirst(T, items, items[indexA], lastB, context, lessThan); - const B_remaining = lastB.end - B_split; - - // swap the minimum A block to the beginning of the rolling A blocks - var minA = blockA.start; - findA = minA + block_size; - while (findA < blockA.end) : (findA += block_size) { - if (lessThan(context, items[findA], items[minA])) { - minA = findA; - } - } - blockSwap(T, items, blockA.start, minA, block_size); - - // swap the first item of the previous A block back with its original value, which is stored in buffer1 - mem.swap(T, &items[blockA.start], &items[indexA]); - indexA += 1; - - // locally merge the previous A block with the B values that follow it - // if lastA fits into the external cache we'll use that (with MergeExternal), - // or if the second internal buffer exists we'll use that (with MergeInternal), - // or failing that we'll use a strictly in-place merge algorithm (MergeInPlace) - - if (lastA.length() <= cache.len) { - mergeExternal(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan, cache[0..]); - } else if (buffer2.length() > 0) { - mergeInternal(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan, buffer2); - } else { - mergeInPlace(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan); - } - - if (buffer2.length() > 0 or block_size <= cache.len) { - // copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway - if (block_size <= cache.len) { - @memcpy(cache[0..block_size], items[blockA.start..][0..block_size]); - } else { - blockSwap(T, items, blockA.start, buffer2.start, block_size); - } - - // this is equivalent to rotating, but faster - // the area normally taken up by the A block is either the contents of buffer2, or data we don't need anymore since we memcopied it - // either way, we don't need to retain the order of those items, so instead of rotating we can just block swap B to where it belongs - blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining); - } else { - // we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation - mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split); - } - - // update the range for the remaining A blocks, and the range remaining from the B block after it was split - lastA = Range.init(blockA.start - B_remaining, blockA.start - B_remaining + block_size); - lastB = Range.init(lastA.end, lastA.end + B_remaining); - - // if there are no more A blocks remaining, this step is finished! - blockA.start += block_size; - if (blockA.length() == 0) break; - } else if (blockB.length() < block_size) { - // move the last B block, which is unevenly sized, to before the remaining A blocks, by using a rotation - // the cache is disabled here since it might contain the contents of the previous A block - mem.rotate(T, items[blockA.start..blockB.end], blockB.start - blockA.start); - - lastB = Range.init(blockA.start, blockA.start + blockB.length()); - blockA.start += blockB.length(); - blockA.end += blockB.length(); - blockB.end = blockB.start; - } else { - // roll the leftmost A block to the end by swapping it with the next B block - blockSwap(T, items, blockA.start, blockB.start, block_size); - lastB = Range.init(blockA.start, blockA.start + block_size); - - blockA.start += block_size; - blockA.end += block_size; - blockB.start += block_size; - - if (blockB.end > B.end - block_size) { - blockB.end = B.end; - } else { - blockB.end += block_size; - } - } - } - } - - // merge the last A block with the remaining B values - if (lastA.length() <= cache.len) { - mergeExternal(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan, cache[0..]); - } else if (buffer2.length() > 0) { - mergeInternal(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan, buffer2); - } else { - mergeInPlace(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan); - } - } - } - - // when we're finished with this merge step we should have the one - // or two internal buffers left over, where the second buffer is all jumbled up - // insertion sort the second buffer, then redistribute the buffers - // back into the items using the opposite process used for creating the buffer - - // while an unstable sort like quicksort could be applied here, in benchmarks - // it was consistently slightly slower than a simple insertion sort, - // even for tens of millions of items. this may be because insertion - // sort is quite fast when the data is already somewhat sorted, like it is here - insertionSort(T, items[buffer2.start..buffer2.end], context, lessThan); - - pull_index = 0; - while (pull_index < 2) : (pull_index += 1) { - var unique = pull[pull_index].count * 2; - if (pull[pull_index].from > pull[pull_index].to) { - // the values were pulled out to the left, so redistribute them back to the right - var buffer = Range.init(pull[pull_index].range.start, pull[pull_index].range.start + pull[pull_index].count); - while (buffer.length() > 0) { - index = findFirstForward(T, items, items[buffer.start], Range.init(buffer.end, pull[pull_index].range.end), context, lessThan, unique); - const amount = index - buffer.end; - mem.rotate(T, items[buffer.start..index], buffer.length()); - buffer.start += (amount + 1); - buffer.end += amount; - unique -= 2; - } - } else if (pull[pull_index].from < pull[pull_index].to) { - // the values were pulled out to the right, so redistribute them back to the left - var buffer = Range.init(pull[pull_index].range.end - pull[pull_index].count, pull[pull_index].range.end); - while (buffer.length() > 0) { - index = findLastBackward(T, items, items[buffer.end - 1], Range.init(pull[pull_index].range.start, buffer.start), context, lessThan, unique); - const amount = buffer.start - index; - mem.rotate(T, items[index..buffer.end], amount); - buffer.start -= amount; - buffer.end -= (amount + 1); - unique -= 2; - } - } - } + pub fn swap(ctx: @This(), a: usize, b: usize) void { + return mem.swap(T, &ctx.items[a], &ctx.items[b]); } - - // double the size of each A and B subarray that will be merged in the next level - if (!iterator.nextLevel()) break; - } -} - -/// TODO currently this just calls `insertionSortContext`. The block sort implementation -/// in this file needs to be adapted to use the sort context. -pub fn sortContext(len: usize, context: anytype) void { - return insertionSortContext(len, context); -} - -// merge operation without a buffer -fn mergeInPlace( - comptime T: type, - items: []T, - A_arg: Range, - B_arg: Range, - context: anytype, - comptime lessThan: fn (@TypeOf(context), T, T) bool, -) void { - if (A_arg.length() == 0 or B_arg.length() == 0) return; - - // this just repeatedly binary searches into B and rotates A into position. - // the paper suggests using the 'rotation-based Hwang and Lin algorithm' here, - // but I decided to stick with this because it had better situational performance - // - // (Hwang and Lin is designed for merging subarrays of very different sizes, - // but WikiSort almost always uses subarrays that are roughly the same size) - // - // normally this is incredibly suboptimal, but this function is only called - // when none of the A or B blocks in any subarray contained 2√A unique values, - // which places a hard limit on the number of times this will ACTUALLY need - // to binary search and rotate. - // - // according to my analysis the worst case is √A rotations performed on √A items - // once the constant factors are removed, which ends up being O(n) - // - // again, this is NOT a general-purpose solution – it only works well in this case! - // kind of like how the O(n^2) insertion sort is used in some places - - var A = A_arg; - var B = B_arg; - - while (true) { - // find the first place in B where the first item in A needs to be inserted - const mid = binaryFirst(T, items, items[A.start], B, context, lessThan); - - // rotate A into place - const amount = mid - A.end; - mem.rotate(T, items[A.start..mid], A.length()); - if (B.end == mid) break; - - // calculate the new A and B ranges - B.start = mid; - A = Range.init(A.start + amount, B.start); - A.start = binaryLast(T, items, items[A.start], A, context, lessThan); - if (A.length() == 0) break; - } -} - -// merge operation using an internal buffer -fn mergeInternal( - comptime T: type, - items: []T, - A: Range, - B: Range, - context: anytype, - comptime lessThan: fn (@TypeOf(context), T, T) bool, - buffer: Range, -) void { - // whenever we find a value to add to the final array, swap it with the value that's already in that spot - // when this algorithm is finished, 'buffer' will contain its original contents, but in a different order - var A_count: usize = 0; - var B_count: usize = 0; - var insert: usize = 0; - - if (B.length() > 0 and A.length() > 0) { - while (true) { - if (!lessThan(context, items[B.start + B_count], items[buffer.start + A_count])) { - mem.swap(T, &items[A.start + insert], &items[buffer.start + A_count]); - A_count += 1; - insert += 1; - if (A_count >= A.length()) break; - } else { - mem.swap(T, &items[A.start + insert], &items[B.start + B_count]); - B_count += 1; - insert += 1; - if (B_count >= B.length()) break; - } - } - } - - // swap the remainder of A into the final array - blockSwap(T, items, buffer.start + A_count, A.start + insert, A.length() - A_count); -} - -fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_size: usize) void { - var index: usize = 0; - while (index < block_size) : (index += 1) { - mem.swap(T, &items[start1 + index], &items[start2 + index]); - } -} - -// combine a linear search with a binary search to reduce the number of comparisons in situations -// where have some idea as to how many unique values there are and where the next value might be -fn findFirstForward( - comptime T: type, - items: []T, - value: T, - range: Range, - context: anytype, - comptime lessThan: fn (@TypeOf(context), T, T) bool, - unique: usize, -) usize { - if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); - - var index = range.start + skip; - while (lessThan(context, items[index - 1], value)) : (index += skip) { - if (index >= range.end - skip) { - return binaryFirst(T, items, value, Range.init(index, range.end), context, lessThan); - } - } - - return binaryFirst(T, items, value, Range.init(index - skip, index), context, lessThan); -} - -fn findFirstBackward( - comptime T: type, - items: []T, - value: T, - range: Range, - context: anytype, - comptime lessThan: fn (@TypeOf(context), T, T) bool, - unique: usize, -) usize { - if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); - - var index = range.end - skip; - while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) { - if (index < range.start + skip) { - return binaryFirst(T, items, value, Range.init(range.start, index), context, lessThan); - } - } - - return binaryFirst(T, items, value, Range.init(index, index + skip), context, lessThan); -} - -fn findLastForward( - comptime T: type, - items: []T, - value: T, - range: Range, - context: anytype, - comptime lessThan: fn (@TypeOf(context), T, T) bool, - unique: usize, -) usize { - if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); - - var index = range.start + skip; - while (!lessThan(context, value, items[index - 1])) : (index += skip) { - if (index >= range.end - skip) { - return binaryLast(T, items, value, Range.init(index, range.end), context, lessThan); - } - } - - return binaryLast(T, items, value, Range.init(index - skip, index), context, lessThan); -} - -fn findLastBackward( - comptime T: type, - items: []T, - value: T, - range: Range, - context: anytype, - comptime lessThan: fn (@TypeOf(context), T, T) bool, - unique: usize, -) usize { - if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); - - var index = range.end - skip; - while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) { - if (index < range.start + skip) { - return binaryLast(T, items, value, Range.init(range.start, index), context, lessThan); - } - } - - return binaryLast(T, items, value, Range.init(index, index + skip), context, lessThan); + }; + heapContext(0, items.len, Context{ .items = items, .sub_ctx = context }); } -fn binaryFirst( - comptime T: type, - items: []T, - value: T, - range: Range, - context: anytype, - comptime lessThan: fn (@TypeOf(context), T, T) bool, -) usize { - var curr = range.start; - var size = range.length(); - if (range.start >= range.end) return range.end; - while (size > 0) { - const offset = size % 2; - - size /= 2; - const mid_item = items[curr + size]; - if (lessThan(context, mid_item, value)) { - curr += size + offset; - } +/// Unstable in-place sort. O(n*log(n)) best case, worst case and average case. +/// O(1) memory (no allocator required). +/// Sorts in ascending order with respect to the given `lessThan` function. +pub fn heapContext(a: usize, b: usize, context: anytype) void { + // build the heap in linear time. + var i = b / 2; + while (i > a) : (i -= 1) { + siftDown(i - 1, b, context); } - return curr; -} - -fn binaryLast( - comptime T: type, - items: []T, - value: T, - range: Range, - context: anytype, - comptime lessThan: fn (@TypeOf(context), T, T) bool, -) usize { - var curr = range.start; - var size = range.length(); - if (range.start >= range.end) return range.end; - while (size > 0) { - const offset = size % 2; - size /= 2; - const mid_item = items[curr + size]; - if (!lessThan(context, value, mid_item)) { - curr += size + offset; - } + // pop maximal elements from the heap. + i = b; + while (i > a) : (i -= 1) { + context.swap(a, i - 1); + siftDown(a, i - 1, context); } - return curr; } -fn mergeInto( - comptime T: type, - from: []T, - A: Range, - B: Range, - context: anytype, - comptime lessThan: fn (@TypeOf(context), T, T) bool, - into: []T, -) void { - var A_index: usize = A.start; - var B_index: usize = B.start; - const A_last = A.end; - const B_last = B.end; - var insert_index: usize = 0; - +fn siftDown(root: usize, n: usize, context: anytype) void { + var node = root; while (true) { - if (!lessThan(context, from[B_index], from[A_index])) { - into[insert_index] = from[A_index]; - A_index += 1; - insert_index += 1; - if (A_index == A_last) { - // copy the remainder of B into the final array - const from_b = from[B_index..B_last]; - @memcpy(into[insert_index..][0..from_b.len], from_b); - break; - } - } else { - into[insert_index] = from[B_index]; - B_index += 1; - insert_index += 1; - if (B_index == B_last) { - // copy the remainder of A into the final array - const from_a = from[A_index..A_last]; - @memcpy(into[insert_index..][0..from_a.len], from_a); - break; - } - } - } -} - -fn mergeExternal( - comptime T: type, - items: []T, - A: Range, - B: Range, - context: anytype, - comptime lessThan: fn (@TypeOf(context), T, T) bool, - cache: []T, -) void { - // A fits into the cache, so use that instead of the internal buffer - var A_index: usize = 0; - var B_index: usize = B.start; - var insert_index: usize = A.start; - const A_last = A.length(); - const B_last = B.end; + var child = 2 * node + 1; + if (child >= n) break; - if (B.length() > 0 and A.length() > 0) { - while (true) { - if (!lessThan(context, items[B_index], cache[A_index])) { - items[insert_index] = cache[A_index]; - A_index += 1; - insert_index += 1; - if (A_index == A_last) break; - } else { - items[insert_index] = items[B_index]; - B_index += 1; - insert_index += 1; - if (B_index == B_last) break; - } + // choose the greater child. + if (child + 1 < n and context.lessThan(child, child + 1)) { + child += 1; } - } - // copy the remainder of A into the final array - const cache_a = cache[A_index..A_last]; - @memcpy(items[insert_index..][0..cache_a.len], cache_a); -} + // stop if the invariant holds at `node`. + if (!context.lessThan(node, child)) break; -fn swap( - comptime T: type, - items: []T, - context: anytype, - comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, - order: *[8]u8, - x: usize, - y: usize, -) void { - if (lessThan(context, items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(context, items[x], items[y]))) { - mem.swap(T, &items[x], &items[y]); - mem.swap(u8, &(order.*)[x], &(order.*)[y]); + // swap `node` with the greater child, + // move one step down, and continue sifting. + context.swap(node, child); + node = child; } } -/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, comptime asc(u8))`. +/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, asc(u8))`. pub fn asc(comptime T: type) fn (void, T, T) bool { - const impl = struct { - fn inner(context: void, a: T, b: T) bool { - _ = context; + return struct { + pub fn inner(_: void, a: T, b: T) bool { return a < b; } - }; - - return impl.inner; + }.inner; } -/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, comptime desc(u8))`. +/// Use to generate a comparator function for a given type. e.g. `sort(u8, slice, {}, desc(u8))`. pub fn desc(comptime T: type) fn (void, T, T) bool { - const impl = struct { - fn inner(context: void, a: T, b: T) bool { - _ = context; + return struct { + pub fn inner(_: void, a: T, b: T) bool { return a > b; } - }; - - return impl.inner; + }.inner; } +const asc_u8 = asc(u8); +const asc_i32 = asc(i32); +const desc_u8 = desc(u8); +const desc_i32 = desc(i32); + +const sort_funcs = &[_]fn (comptime type, anytype, anytype, comptime anytype) void{ + block, + pdq, + insertion, + heap, +}; + +const IdAndValue = struct { + id: usize, + value: i32, + + fn lessThan(context: void, a: IdAndValue, b: IdAndValue) bool { + _ = context; + return a.value < b.value; + } +}; + test "stable sort" { - try testStableSort(); - comptime try testStableSort(); -} -fn testStableSort() !void { - var expected = [_]IdAndValue{ + const expected = [_]IdAndValue{ IdAndValue{ .id = 0, .value = 0 }, IdAndValue{ .id = 1, .value = 0 }, IdAndValue{ .id = 2, .value = 0 }, @@ -1249,6 +160,7 @@ fn testStableSort() !void { IdAndValue{ .id = 1, .value = 2 }, IdAndValue{ .id = 2, .value = 2 }, }; + var cases = [_][9]IdAndValue{ [_]IdAndValue{ IdAndValue{ .id = 0, .value = 0 }, @@ -1273,26 +185,15 @@ fn testStableSort() !void { IdAndValue{ .id = 2, .value = 0 }, }, }; + for (&cases) |*case| { - insertionSort(IdAndValue, (case.*)[0..], {}, cmpByValue); + block(IdAndValue, (case.*)[0..], {}, IdAndValue.lessThan); for (case.*, 0..) |item, i| { try testing.expect(item.id == expected[i].id); try testing.expect(item.value == expected[i].value); } } } -const IdAndValue = struct { - id: usize, - value: i32, -}; -fn cmpByValue(context: void, a: IdAndValue, b: IdAndValue) bool { - return asc_i32(context, a.value, b.value); -} - -const asc_u8 = asc(u8); -const asc_i32 = asc(i32); -const desc_u8 = desc(u8); -const desc_i32 = desc(i32); test "sort" { const u8cases = [_][]const []const u8{ @@ -1322,14 +223,6 @@ test "sort" { }, }; - for (u8cases) |case| { - var buf: [8]u8 = undefined; - const slice = buf[0..case[0].len]; - @memcpy(slice, case[0]); - sort(u8, slice, {}, asc_u8); - try testing.expect(mem.eql(u8, slice, case[1])); - } - const i32cases = [_][]const []const i32{ &[_][]const i32{ &[_]i32{}, @@ -1357,12 +250,22 @@ test "sort" { }, }; - for (i32cases) |case| { - var buf: [8]i32 = undefined; - const slice = buf[0..case[0].len]; - @memcpy(slice, case[0]); - sort(i32, slice, {}, asc_i32); - try testing.expect(mem.eql(i32, slice, case[1])); + inline for (sort_funcs) |sortFn| { + for (u8cases) |case| { + var buf: [8]u8 = undefined; + const slice = buf[0..case[0].len]; + @memcpy(slice, case[0]); + sortFn(u8, slice, {}, asc_u8); + try testing.expect(mem.eql(u8, slice, case[1])); + } + + for (i32cases) |case| { + var buf: [8]i32 = undefined; + const slice = buf[0..case[0].len]; + @memcpy(slice, case[0]); + sortFn(i32, slice, {}, asc_i32); + try testing.expect(mem.eql(i32, slice, case[1])); + } } } @@ -1394,53 +297,139 @@ test "sort descending" { }, }; - for (rev_cases) |case| { - var buf: [8]i32 = undefined; - const slice = buf[0..case[0].len]; - @memcpy(slice, case[0]); - sort(i32, slice, {}, desc_i32); - try testing.expect(mem.eql(i32, slice, case[1])); + inline for (sort_funcs) |sortFn| { + for (rev_cases) |case| { + var buf: [8]i32 = undefined; + const slice = buf[0..case[0].len]; + @memcpy(slice, case[0]); + sortFn(i32, slice, {}, desc_i32); + try testing.expect(mem.eql(i32, slice, case[1])); + } } } -test "another sort case" { - var arr = [_]i32{ 5, 3, 1, 2, 4 }; - sort(i32, arr[0..], {}, asc_i32); - - try testing.expect(mem.eql(i32, &arr, &[_]i32{ 1, 2, 3, 4, 5 })); -} - test "sort fuzz testing" { var prng = std.rand.DefaultPrng.init(0x12345678); const random = prng.random(); const test_case_count = 10; - var i: usize = 0; - while (i < test_case_count) : (i += 1) { - try fuzzTest(random); + + inline for (sort_funcs) |sortFn| { + var i: usize = 0; + while (i < test_case_count) : (i += 1) { + const array_size = random.intRangeLessThan(usize, 0, 1000); + var array = try testing.allocator.alloc(i32, array_size); + defer testing.allocator.free(array); + // populate with random data + for (array) |*item| { + item.* = random.intRangeLessThan(i32, 0, 100); + } + sortFn(i32, array, {}, asc_i32); + try testing.expect(isSorted(i32, array, {}, asc_i32)); + } } } -var fixed_buffer_mem: [100 * 1024]u8 = undefined; +pub fn binarySearch( + comptime T: type, + key: anytype, + items: []const T, + context: anytype, + comptime compareFn: fn (context: @TypeOf(context), key: @TypeOf(key), mid_item: T) math.Order, +) ?usize { + var left: usize = 0; + var right: usize = items.len; -fn fuzzTest(rng: std.rand.Random) !void { - const array_size = rng.intRangeLessThan(usize, 0, 1000); - var array = try testing.allocator.alloc(IdAndValue, array_size); - defer testing.allocator.free(array); - // populate with random data - for (array, 0..) |*item, index| { - item.id = index; - item.value = rng.intRangeLessThan(i32, 0, 100); + while (left < right) { + // Avoid overflowing in the midpoint calculation + const mid = left + (right - left) / 2; + // Compare the key with the midpoint element + switch (compareFn(context, key, items[mid])) { + .eq => return mid, + .gt => left = mid + 1, + .lt => right = mid, + } } - sort(IdAndValue, array, {}, cmpByValue); - var index: usize = 1; - while (index < array.len) : (index += 1) { - if (array[index].value == array[index - 1].value) { - try testing.expect(array[index].id > array[index - 1].id); - } else { - try testing.expect(array[index].value > array[index - 1].value); + return null; +} + +test "binarySearch" { + const S = struct { + fn order_u32(context: void, lhs: u32, rhs: u32) math.Order { + _ = context; + return math.order(lhs, rhs); } - } + fn order_i32(context: void, lhs: i32, rhs: i32) math.Order { + _ = context; + return math.order(lhs, rhs); + } + }; + try testing.expectEqual( + @as(?usize, null), + binarySearch(u32, @as(u32, 1), &[_]u32{}, {}, S.order_u32), + ); + try testing.expectEqual( + @as(?usize, 0), + binarySearch(u32, @as(u32, 1), &[_]u32{1}, {}, S.order_u32), + ); + try testing.expectEqual( + @as(?usize, null), + binarySearch(u32, @as(u32, 1), &[_]u32{0}, {}, S.order_u32), + ); + try testing.expectEqual( + @as(?usize, null), + binarySearch(u32, @as(u32, 0), &[_]u32{1}, {}, S.order_u32), + ); + try testing.expectEqual( + @as(?usize, 4), + binarySearch(u32, @as(u32, 5), &[_]u32{ 1, 2, 3, 4, 5 }, {}, S.order_u32), + ); + try testing.expectEqual( + @as(?usize, 0), + binarySearch(u32, @as(u32, 2), &[_]u32{ 2, 4, 8, 16, 32, 64 }, {}, S.order_u32), + ); + try testing.expectEqual( + @as(?usize, 1), + binarySearch(i32, @as(i32, -4), &[_]i32{ -7, -4, 0, 9, 10 }, {}, S.order_i32), + ); + try testing.expectEqual( + @as(?usize, 3), + binarySearch(i32, @as(i32, 98), &[_]i32{ -100, -25, 2, 98, 99, 100 }, {}, S.order_i32), + ); + const R = struct { + b: i32, + e: i32, + + fn r(b: i32, e: i32) @This() { + return @This(){ .b = b, .e = e }; + } + + fn order(context: void, key: i32, mid_item: @This()) math.Order { + _ = context; + + if (key < mid_item.b) { + return .lt; + } + + if (key > mid_item.e) { + return .gt; + } + + return .eq; + } + }; + try testing.expectEqual( + @as(?usize, null), + binarySearch(R, @as(i32, -45), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order), + ); + try testing.expectEqual( + @as(?usize, 2), + binarySearch(R, @as(i32, 10), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order), + ); + try testing.expectEqual( + @as(?usize, 1), + binarySearch(R, @as(i32, -20), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order), + ); } pub fn argMin( diff --git a/lib/std/sort/block.zig b/lib/std/sort/block.zig new file mode 100644 index 0000000000..6c1be9c6c2 --- /dev/null +++ b/lib/std/sort/block.zig @@ -0,0 +1,1066 @@ +const std = @import("../std.zig"); +const sort = std.sort; +const math = std.math; +const mem = std.mem; + +const Range = struct { + start: usize, + end: usize, + + fn init(start: usize, end: usize) Range { + return Range{ + .start = start, + .end = end, + }; + } + + fn length(self: Range) usize { + return self.end - self.start; + } +}; + +const Iterator = struct { + size: usize, + power_of_two: usize, + numerator: usize, + decimal: usize, + denominator: usize, + decimal_step: usize, + numerator_step: usize, + + fn init(size2: usize, min_level: usize) Iterator { + const power_of_two = math.floorPowerOfTwo(usize, size2); + const denominator = power_of_two / min_level; + return Iterator{ + .numerator = 0, + .decimal = 0, + .size = size2, + .power_of_two = power_of_two, + .denominator = denominator, + .decimal_step = size2 / denominator, + .numerator_step = size2 % denominator, + }; + } + + fn begin(self: *Iterator) void { + self.numerator = 0; + self.decimal = 0; + } + + fn nextRange(self: *Iterator) Range { + const start = self.decimal; + + self.decimal += self.decimal_step; + self.numerator += self.numerator_step; + if (self.numerator >= self.denominator) { + self.numerator -= self.denominator; + self.decimal += 1; + } + + return Range{ + .start = start, + .end = self.decimal, + }; + } + + fn finished(self: *Iterator) bool { + return self.decimal >= self.size; + } + + fn nextLevel(self: *Iterator) bool { + self.decimal_step += self.decimal_step; + self.numerator_step += self.numerator_step; + if (self.numerator_step >= self.denominator) { + self.numerator_step -= self.denominator; + self.decimal_step += 1; + } + + return (self.decimal_step < self.size); + } + + fn length(self: *Iterator) usize { + return self.decimal_step; + } +}; + +const Pull = struct { + from: usize, + to: usize, + count: usize, + range: Range, +}; + +/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. +/// O(1) memory (no allocator required). +/// Sorts in ascending order with respect to the given `lessThan` function. +/// +/// NOTE: the algorithm only work when the comparison is less-than or greater-than +/// (See https://github.com/ziglang/zig/issues/8289) +pub fn block( + comptime T: type, + items: []T, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + + // Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c + var cache: [512]T = undefined; + + if (items.len < 4) { + if (items.len == 3) { + // hard coded insertion sort + if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); + if (lessThan(context, items[2], items[1])) { + mem.swap(T, &items[1], &items[2]); + if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); + } + } else if (items.len == 2) { + if (lessThan(context, items[1], items[0])) mem.swap(T, &items[0], &items[1]); + } + return; + } + + // sort groups of 4-8 items at a time using an unstable sorting network, + // but keep track of the original item orders to force it to be stable + // http://pages.ripco.net/~jgamble/nw.html + var iterator = Iterator.init(items.len, 4); + while (!iterator.finished()) { + var order = [_]u8{ 0, 1, 2, 3, 4, 5, 6, 7 }; + const range = iterator.nextRange(); + + const sliced_items = items[range.start..]; + switch (range.length()) { + 8 => { + swap(T, sliced_items, &order, 0, 1, context, lessThan); + swap(T, sliced_items, &order, 2, 3, context, lessThan); + swap(T, sliced_items, &order, 4, 5, context, lessThan); + swap(T, sliced_items, &order, 6, 7, context, lessThan); + swap(T, sliced_items, &order, 0, 2, context, lessThan); + swap(T, sliced_items, &order, 1, 3, context, lessThan); + swap(T, sliced_items, &order, 4, 6, context, lessThan); + swap(T, sliced_items, &order, 5, 7, context, lessThan); + swap(T, sliced_items, &order, 1, 2, context, lessThan); + swap(T, sliced_items, &order, 5, 6, context, lessThan); + swap(T, sliced_items, &order, 0, 4, context, lessThan); + swap(T, sliced_items, &order, 3, 7, context, lessThan); + swap(T, sliced_items, &order, 1, 5, context, lessThan); + swap(T, sliced_items, &order, 2, 6, context, lessThan); + swap(T, sliced_items, &order, 1, 4, context, lessThan); + swap(T, sliced_items, &order, 3, 6, context, lessThan); + swap(T, sliced_items, &order, 2, 4, context, lessThan); + swap(T, sliced_items, &order, 3, 5, context, lessThan); + swap(T, sliced_items, &order, 3, 4, context, lessThan); + }, + 7 => { + swap(T, sliced_items, &order, 1, 2, context, lessThan); + swap(T, sliced_items, &order, 3, 4, context, lessThan); + swap(T, sliced_items, &order, 5, 6, context, lessThan); + swap(T, sliced_items, &order, 0, 2, context, lessThan); + swap(T, sliced_items, &order, 3, 5, context, lessThan); + swap(T, sliced_items, &order, 4, 6, context, lessThan); + swap(T, sliced_items, &order, 0, 1, context, lessThan); + swap(T, sliced_items, &order, 4, 5, context, lessThan); + swap(T, sliced_items, &order, 2, 6, context, lessThan); + swap(T, sliced_items, &order, 0, 4, context, lessThan); + swap(T, sliced_items, &order, 1, 5, context, lessThan); + swap(T, sliced_items, &order, 0, 3, context, lessThan); + swap(T, sliced_items, &order, 2, 5, context, lessThan); + swap(T, sliced_items, &order, 1, 3, context, lessThan); + swap(T, sliced_items, &order, 2, 4, context, lessThan); + swap(T, sliced_items, &order, 2, 3, context, lessThan); + }, + 6 => { + swap(T, sliced_items, &order, 1, 2, context, lessThan); + swap(T, sliced_items, &order, 4, 5, context, lessThan); + swap(T, sliced_items, &order, 0, 2, context, lessThan); + swap(T, sliced_items, &order, 3, 5, context, lessThan); + swap(T, sliced_items, &order, 0, 1, context, lessThan); + swap(T, sliced_items, &order, 3, 4, context, lessThan); + swap(T, sliced_items, &order, 2, 5, context, lessThan); + swap(T, sliced_items, &order, 0, 3, context, lessThan); + swap(T, sliced_items, &order, 1, 4, context, lessThan); + swap(T, sliced_items, &order, 2, 4, context, lessThan); + swap(T, sliced_items, &order, 1, 3, context, lessThan); + swap(T, sliced_items, &order, 2, 3, context, lessThan); + }, + 5 => { + swap(T, sliced_items, &order, 0, 1, context, lessThan); + swap(T, sliced_items, &order, 3, 4, context, lessThan); + swap(T, sliced_items, &order, 2, 4, context, lessThan); + swap(T, sliced_items, &order, 2, 3, context, lessThan); + swap(T, sliced_items, &order, 1, 4, context, lessThan); + swap(T, sliced_items, &order, 0, 3, context, lessThan); + swap(T, sliced_items, &order, 0, 2, context, lessThan); + swap(T, sliced_items, &order, 1, 3, context, lessThan); + swap(T, sliced_items, &order, 1, 2, context, lessThan); + }, + 4 => { + swap(T, sliced_items, &order, 0, 1, context, lessThan); + swap(T, sliced_items, &order, 2, 3, context, lessThan); + swap(T, sliced_items, &order, 0, 2, context, lessThan); + swap(T, sliced_items, &order, 1, 3, context, lessThan); + swap(T, sliced_items, &order, 1, 2, context, lessThan); + }, + else => {}, + } + } + if (items.len < 8) return; + + // then merge sort the higher levels, which can be 8-15, 16-31, 32-63, 64-127, etc. + while (true) { + // if every A and B block will fit into the cache, use a special branch + // specifically for merging with the cache + // (we use < rather than <= since the block size might be one more than + // iterator.length()) + if (iterator.length() < cache.len) { + // if four subarrays fit into the cache, it's faster to merge both + // pairs of subarrays into the cache, + // then merge the two merged subarrays from the cache back into the original array + if ((iterator.length() + 1) * 4 <= cache.len and iterator.length() * 4 <= items.len) { + iterator.begin(); + while (!iterator.finished()) { + // merge A1 and B1 into the cache + var A1 = iterator.nextRange(); + var B1 = iterator.nextRange(); + var A2 = iterator.nextRange(); + var B2 = iterator.nextRange(); + + if (lessThan(context, items[B1.end - 1], items[A1.start])) { + // the two ranges are in reverse order, so copy them in reverse order into the cache + const a1_items = items[A1.start..A1.end]; + @memcpy(cache[B1.length()..][0..a1_items.len], a1_items); + const b1_items = items[B1.start..B1.end]; + @memcpy(cache[0..b1_items.len], b1_items); + } else if (lessThan(context, items[B1.start], items[A1.end - 1])) { + // these two ranges weren't already in order, so merge them into the cache + mergeInto(T, items, A1, B1, cache[0..], context, lessThan); + } else { + // if A1, B1, A2, and B2 are all in order, skip doing anything else + if (!lessThan(context, items[B2.start], items[A2.end - 1]) and !lessThan(context, items[A2.start], items[B1.end - 1])) continue; + + // copy A1 and B1 into the cache in the same order + const a1_items = items[A1.start..A1.end]; + @memcpy(cache[0..a1_items.len], a1_items); + const b1_items = items[B1.start..B1.end]; + @memcpy(cache[A1.length()..][0..b1_items.len], b1_items); + } + A1 = Range.init(A1.start, B1.end); + + // merge A2 and B2 into the cache + if (lessThan(context, items[B2.end - 1], items[A2.start])) { + // the two ranges are in reverse order, so copy them in reverse order into the cache + const a2_items = items[A2.start..A2.end]; + @memcpy(cache[A1.length() + B2.length() ..][0..a2_items.len], a2_items); + const b2_items = items[B2.start..B2.end]; + @memcpy(cache[A1.length()..][0..b2_items.len], b2_items); + } else if (lessThan(context, items[B2.start], items[A2.end - 1])) { + // these two ranges weren't already in order, so merge them into the cache + mergeInto(T, items, A2, B2, cache[A1.length()..], context, lessThan); + } else { + // copy A2 and B2 into the cache in the same order + const a2_items = items[A2.start..A2.end]; + @memcpy(cache[A1.length()..][0..a2_items.len], a2_items); + const b2_items = items[B2.start..B2.end]; + @memcpy(cache[A1.length() + A2.length() ..][0..b2_items.len], b2_items); + } + A2 = Range.init(A2.start, B2.end); + + // merge A1 and A2 from the cache into the items + const A3 = Range.init(0, A1.length()); + const B3 = Range.init(A1.length(), A1.length() + A2.length()); + + if (lessThan(context, cache[B3.end - 1], cache[A3.start])) { + // the two ranges are in reverse order, so copy them in reverse order into the items + const a3_items = cache[A3.start..A3.end]; + @memcpy(items[A1.start + A2.length() ..][0..a3_items.len], a3_items); + const b3_items = cache[B3.start..B3.end]; + @memcpy(items[A1.start..][0..b3_items.len], b3_items); + } else if (lessThan(context, cache[B3.start], cache[A3.end - 1])) { + // these two ranges weren't already in order, so merge them back into the items + mergeInto(T, cache[0..], A3, B3, items[A1.start..], context, lessThan); + } else { + // copy A3 and B3 into the items in the same order + const a3_items = cache[A3.start..A3.end]; + @memcpy(items[A1.start..][0..a3_items.len], a3_items); + const b3_items = cache[B3.start..B3.end]; + @memcpy(items[A1.start + A1.length() ..][0..b3_items.len], b3_items); + } + } + + // we merged two levels at the same time, so we're done with this level already + // (iterator.nextLevel() is called again at the bottom of this outer merge loop) + _ = iterator.nextLevel(); + } else { + iterator.begin(); + while (!iterator.finished()) { + var A = iterator.nextRange(); + var B = iterator.nextRange(); + + if (lessThan(context, items[B.end - 1], items[A.start])) { + // the two ranges are in reverse order, so a simple rotation should fix it + mem.rotate(T, items[A.start..B.end], A.length()); + } else if (lessThan(context, items[B.start], items[A.end - 1])) { + // these two ranges weren't already in order, so we'll need to merge them! + const a_items = items[A.start..A.end]; + @memcpy(cache[0..a_items.len], a_items); + mergeExternal(T, items, A, B, cache[0..], context, lessThan); + } + } + } + } else { + // this is where the in-place merge logic starts! + // 1. pull out two internal buffers each containing √A unique values + // 1a. adjust block_size and buffer_size if we couldn't find enough unique values + // 2. loop over the A and B subarrays within this level of the merge sort + // 3. break A and B into blocks of size 'block_size' + // 4. "tag" each of the A blocks with values from the first internal buffer + // 5. roll the A blocks through the B blocks and drop/rotate them where they belong + // 6. merge each A block with any B values that follow, using the cache or the second internal buffer + // 7. sort the second internal buffer if it exists + // 8. redistribute the two internal buffers back into the items + var block_size: usize = math.sqrt(iterator.length()); + var buffer_size = iterator.length() / block_size + 1; + + // as an optimization, we really only need to pull out the internal buffers once for each level of merges + // after that we can reuse the same buffers over and over, then redistribute it when we're finished with this level + var A: Range = undefined; + var B: Range = undefined; + var index: usize = 0; + var last: usize = 0; + var count: usize = 0; + var find: usize = 0; + var start: usize = 0; + var pull_index: usize = 0; + var pull = [_]Pull{ + Pull{ + .from = 0, + .to = 0, + .count = 0, + .range = Range.init(0, 0), + }, + Pull{ + .from = 0, + .to = 0, + .count = 0, + .range = Range.init(0, 0), + }, + }; + + var buffer1 = Range.init(0, 0); + var buffer2 = Range.init(0, 0); + + // find two internal buffers of size 'buffer_size' each + find = buffer_size + buffer_size; + var find_separately = false; + + if (block_size <= cache.len) { + // if every A block fits into the cache then we won't need the second internal buffer, + // so we really only need to find 'buffer_size' unique values + find = buffer_size; + } else if (find > iterator.length()) { + // we can't fit both buffers into the same A or B subarray, so find two buffers separately + find = buffer_size; + find_separately = true; + } + + // we need to find either a single contiguous space containing 2√A unique values (which will be split up into two buffers of size √A each), + // or we need to find one buffer of < 2√A unique values, and a second buffer of √A unique values, + // OR if we couldn't find that many unique values, we need the largest possible buffer we can get + + // in the case where it couldn't find a single buffer of at least √A unique values, + // all of the Merge steps must be replaced by a different merge algorithm (MergeInPlace) + iterator.begin(); + while (!iterator.finished()) { + A = iterator.nextRange(); + B = iterator.nextRange(); + + // just store information about where the values will be pulled from and to, + // as well as how many values there are, to create the two internal buffers + + // check A for the number of unique values we need to fill an internal buffer + // these values will be pulled out to the start of A + last = A.start; + count = 1; + while (count < find) : ({ + last = index; + count += 1; + }) { + index = findLastForward(T, items, items[last], Range.init(last + 1, A.end), find - count, context, lessThan); + if (index == A.end) break; + } + index = last; + + if (count >= buffer_size) { + // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffer + pull[pull_index] = Pull{ + .range = Range.init(A.start, B.end), + .count = count, + .from = index, + .to = A.start, + }; + pull_index = 1; + + if (count == buffer_size + buffer_size) { + // we were able to find a single contiguous section containing 2√A unique values, + // so this section can be used to contain both of the internal buffers we'll need + buffer1 = Range.init(A.start, A.start + buffer_size); + buffer2 = Range.init(A.start + buffer_size, A.start + count); + break; + } else if (find == buffer_size + buffer_size) { + // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values, + // so we still need to find a second separate buffer of at least √A unique values + buffer1 = Range.init(A.start, A.start + count); + find = buffer_size; + } else if (block_size <= cache.len) { + // we found the first and only internal buffer that we need, so we're done! + buffer1 = Range.init(A.start, A.start + count); + break; + } else if (find_separately) { + // found one buffer, but now find the other one + buffer1 = Range.init(A.start, A.start + count); + find_separately = false; + } else { + // we found a second buffer in an 'A' subarray containing √A unique values, so we're done! + buffer2 = Range.init(A.start, A.start + count); + break; + } + } else if (pull_index == 0 and count > buffer1.length()) { + // keep track of the largest buffer we were able to find + buffer1 = Range.init(A.start, A.start + count); + pull[pull_index] = Pull{ + .range = Range.init(A.start, B.end), + .count = count, + .from = index, + .to = A.start, + }; + } + + // check B for the number of unique values we need to fill an internal buffer + // these values will be pulled out to the end of B + last = B.end - 1; + count = 1; + while (count < find) : ({ + last = index - 1; + count += 1; + }) { + index = findFirstBackward(T, items, items[last], Range.init(B.start, last), find - count, context, lessThan); + if (index == B.start) break; + } + index = last; + + if (count >= buffer_size) { + // keep track of the range within the items where we'll need to "pull out" these values to create the internal buffe + pull[pull_index] = Pull{ + .range = Range.init(A.start, B.end), + .count = count, + .from = index, + .to = B.end, + }; + pull_index = 1; + + if (count == buffer_size + buffer_size) { + // we were able to find a single contiguous section containing 2√A unique values, + // so this section can be used to contain both of the internal buffers we'll need + buffer1 = Range.init(B.end - count, B.end - buffer_size); + buffer2 = Range.init(B.end - buffer_size, B.end); + break; + } else if (find == buffer_size + buffer_size) { + // we found a buffer that contains at least √A unique values, but did not contain the full 2√A unique values, + // so we still need to find a second separate buffer of at least √A unique values + buffer1 = Range.init(B.end - count, B.end); + find = buffer_size; + } else if (block_size <= cache.len) { + // we found the first and only internal buffer that we need, so we're done! + buffer1 = Range.init(B.end - count, B.end); + break; + } else if (find_separately) { + // found one buffer, but now find the other one + buffer1 = Range.init(B.end - count, B.end); + find_separately = false; + } else { + // buffer2 will be pulled out from a 'B' subarray, so if the first buffer was pulled out from the corresponding 'A' subarray, + // we need to adjust the end point for that A subarray so it knows to stop redistributing its values before reaching buffer2 + if (pull[0].range.start == A.start) pull[0].range.end -= pull[1].count; + + // we found a second buffer in an 'B' subarray containing √A unique values, so we're done! + buffer2 = Range.init(B.end - count, B.end); + break; + } + } else if (pull_index == 0 and count > buffer1.length()) { + // keep track of the largest buffer we were able to find + buffer1 = Range.init(B.end - count, B.end); + pull[pull_index] = Pull{ + .range = Range.init(A.start, B.end), + .count = count, + .from = index, + .to = B.end, + }; + } + } + + // pull out the two ranges so we can use them as internal buffers + pull_index = 0; + while (pull_index < 2) : (pull_index += 1) { + const length = pull[pull_index].count; + + if (pull[pull_index].to < pull[pull_index].from) { + // we're pulling the values out to the left, which means the start of an A subarray + index = pull[pull_index].from; + count = 1; + while (count < length) : (count += 1) { + index = findFirstBackward(T, items, items[index - 1], Range.init(pull[pull_index].to, pull[pull_index].from - (count - 1)), length - count, context, lessThan); + const range = Range.init(index + 1, pull[pull_index].from + 1); + mem.rotate(T, items[range.start..range.end], range.length() - count); + pull[pull_index].from = index + count; + } + } else if (pull[pull_index].to > pull[pull_index].from) { + // we're pulling values out to the right, which means the end of a B subarray + index = pull[pull_index].from + 1; + count = 1; + while (count < length) : (count += 1) { + index = findLastForward(T, items, items[index], Range.init(index, pull[pull_index].to), length - count, context, lessThan); + const range = Range.init(pull[pull_index].from, index - 1); + mem.rotate(T, items[range.start..range.end], count); + pull[pull_index].from = index - 1 - count; + } + } + } + + // adjust block_size and buffer_size based on the values we were able to pull out + buffer_size = buffer1.length(); + block_size = iterator.length() / buffer_size + 1; + + // the first buffer NEEDS to be large enough to tag each of the evenly sized A blocks, + // so this was originally here to test the math for adjusting block_size above + // assert((iterator.length() + 1)/block_size <= buffer_size); + + // now that the two internal buffers have been created, it's time to merge each A+B combination at this level of the merge sort! + iterator.begin(); + while (!iterator.finished()) { + A = iterator.nextRange(); + B = iterator.nextRange(); + + // remove any parts of A or B that are being used by the internal buffers + start = A.start; + if (start == pull[0].range.start) { + if (pull[0].from > pull[0].to) { + A.start += pull[0].count; + + // if the internal buffer takes up the entire A or B subarray, then there's nothing to merge + // this only happens for very small subarrays, like √4 = 2, 2 * (2 internal buffers) = 4, + // which also only happens when cache.len is small or 0 since it'd otherwise use MergeExternal + if (A.length() == 0) continue; + } else if (pull[0].from < pull[0].to) { + B.end -= pull[0].count; + if (B.length() == 0) continue; + } + } + if (start == pull[1].range.start) { + if (pull[1].from > pull[1].to) { + A.start += pull[1].count; + if (A.length() == 0) continue; + } else if (pull[1].from < pull[1].to) { + B.end -= pull[1].count; + if (B.length() == 0) continue; + } + } + + if (lessThan(context, items[B.end - 1], items[A.start])) { + // the two ranges are in reverse order, so a simple rotation should fix it + mem.rotate(T, items[A.start..B.end], A.length()); + } else if (lessThan(context, items[A.end], items[A.end - 1])) { + // these two ranges weren't already in order, so we'll need to merge them! + var findA: usize = undefined; + + // break the remainder of A into blocks. firstA is the uneven-sized first A block + var blockA = Range.init(A.start, A.end); + var firstA = Range.init(A.start, A.start + blockA.length() % block_size); + + // swap the first value of each A block with the value in buffer1 + var indexA = buffer1.start; + index = firstA.end; + while (index < blockA.end) : ({ + indexA += 1; + index += block_size; + }) { + mem.swap(T, &items[indexA], &items[index]); + } + + // start rolling the A blocks through the B blocks! + // whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well + var lastA = firstA; + var lastB = Range.init(0, 0); + var blockB = Range.init(B.start, B.start + math.min(block_size, B.length())); + blockA.start += firstA.length(); + indexA = buffer1.start; + + // if the first unevenly sized A block fits into the cache, copy it there for when we go to Merge it + // otherwise, if the second buffer is available, block swap the contents into that + if (lastA.length() <= cache.len) { + const last_a_items = items[lastA.start..lastA.end]; + @memcpy(cache[0..last_a_items.len], last_a_items); + } else if (buffer2.length() > 0) { + blockSwap(T, items, lastA.start, buffer2.start, lastA.length()); + } + + if (blockA.length() > 0) { + while (true) { + // if there's a previous B block and the first value of the minimum A block is <= the last value of the previous B block, + // then drop that minimum A block behind. or if there are no B blocks left then keep dropping the remaining A blocks. + if ((lastB.length() > 0 and !lessThan(context, items[lastB.end - 1], items[indexA])) or blockB.length() == 0) { + // figure out where to split the previous B block, and rotate it at the split + const B_split = binaryFirst(T, items, items[indexA], lastB, context, lessThan); + const B_remaining = lastB.end - B_split; + + // swap the minimum A block to the beginning of the rolling A blocks + var minA = blockA.start; + findA = minA + block_size; + while (findA < blockA.end) : (findA += block_size) { + if (lessThan(context, items[findA], items[minA])) { + minA = findA; + } + } + blockSwap(T, items, blockA.start, minA, block_size); + + // swap the first item of the previous A block back with its original value, which is stored in buffer1 + mem.swap(T, &items[blockA.start], &items[indexA]); + indexA += 1; + + // locally merge the previous A block with the B values that follow it + // if lastA fits into the external cache we'll use that (with MergeExternal), + // or if the second internal buffer exists we'll use that (with MergeInternal), + // or failing that we'll use a strictly in-place merge algorithm (MergeInPlace) + + if (lastA.length() <= cache.len) { + mergeExternal(T, items, lastA, Range.init(lastA.end, B_split), cache[0..], context, lessThan); + } else if (buffer2.length() > 0) { + mergeInternal(T, items, lastA, Range.init(lastA.end, B_split), buffer2, context, lessThan); + } else { + mergeInPlace(T, items, lastA, Range.init(lastA.end, B_split), context, lessThan); + } + + if (buffer2.length() > 0 or block_size <= cache.len) { + // copy the previous A block into the cache or buffer2, since that's where we need it to be when we go to merge it anyway + if (block_size <= cache.len) { + @memcpy(cache[0..block_size], items[blockA.start..][0..block_size]); + } else { + blockSwap(T, items, blockA.start, buffer2.start, block_size); + } + + // this is equivalent to rotating, but faster + // the area normally taken up by the A block is either the contents of buffer2, or data we don't need anymore since we memcopied it + // either way, we don't need to retain the order of those items, so instead of rotating we can just block swap B to where it belongs + blockSwap(T, items, B_split, blockA.start + block_size - B_remaining, B_remaining); + } else { + // we are unable to use the 'buffer2' trick to speed up the rotation operation since buffer2 doesn't exist, so perform a normal rotation + mem.rotate(T, items[B_split .. blockA.start + block_size], blockA.start - B_split); + } + + // update the range for the remaining A blocks, and the range remaining from the B block after it was split + lastA = Range.init(blockA.start - B_remaining, blockA.start - B_remaining + block_size); + lastB = Range.init(lastA.end, lastA.end + B_remaining); + + // if there are no more A blocks remaining, this step is finished! + blockA.start += block_size; + if (blockA.length() == 0) break; + } else if (blockB.length() < block_size) { + // move the last B block, which is unevenly sized, to before the remaining A blocks, by using a rotation + // the cache is disabled here since it might contain the contents of the previous A block + mem.rotate(T, items[blockA.start..blockB.end], blockB.start - blockA.start); + + lastB = Range.init(blockA.start, blockA.start + blockB.length()); + blockA.start += blockB.length(); + blockA.end += blockB.length(); + blockB.end = blockB.start; + } else { + // roll the leftmost A block to the end by swapping it with the next B block + blockSwap(T, items, blockA.start, blockB.start, block_size); + lastB = Range.init(blockA.start, blockA.start + block_size); + + blockA.start += block_size; + blockA.end += block_size; + blockB.start += block_size; + + if (blockB.end > B.end - block_size) { + blockB.end = B.end; + } else { + blockB.end += block_size; + } + } + } + } + + // merge the last A block with the remaining B values + if (lastA.length() <= cache.len) { + mergeExternal(T, items, lastA, Range.init(lastA.end, B.end), cache[0..], context, lessThan); + } else if (buffer2.length() > 0) { + mergeInternal(T, items, lastA, Range.init(lastA.end, B.end), buffer2, context, lessThan); + } else { + mergeInPlace(T, items, lastA, Range.init(lastA.end, B.end), context, lessThan); + } + } + } + + // when we're finished with this merge step we should have the one + // or two internal buffers left over, where the second buffer is all jumbled up + // insertion sort the second buffer, then redistribute the buffers + // back into the items using the opposite process used for creating the buffer + + // while an unstable sort like quicksort could be applied here, in benchmarks + // it was consistently slightly slower than a simple insertion sort, + // even for tens of millions of items. this may be because insertion + // sort is quite fast when the data is already somewhat sorted, like it is here + sort.insertion(T, items[buffer2.start..buffer2.end], context, lessThan); + + pull_index = 0; + while (pull_index < 2) : (pull_index += 1) { + var unique = pull[pull_index].count * 2; + if (pull[pull_index].from > pull[pull_index].to) { + // the values were pulled out to the left, so redistribute them back to the right + var buffer = Range.init(pull[pull_index].range.start, pull[pull_index].range.start + pull[pull_index].count); + while (buffer.length() > 0) { + index = findFirstForward(T, items, items[buffer.start], Range.init(buffer.end, pull[pull_index].range.end), unique, context, lessThan); + const amount = index - buffer.end; + mem.rotate(T, items[buffer.start..index], buffer.length()); + buffer.start += (amount + 1); + buffer.end += amount; + unique -= 2; + } + } else if (pull[pull_index].from < pull[pull_index].to) { + // the values were pulled out to the right, so redistribute them back to the left + var buffer = Range.init(pull[pull_index].range.end - pull[pull_index].count, pull[pull_index].range.end); + while (buffer.length() > 0) { + index = findLastBackward(T, items, items[buffer.end - 1], Range.init(pull[pull_index].range.start, buffer.start), unique, context, lessThan); + const amount = buffer.start - index; + mem.rotate(T, items[index..buffer.end], amount); + buffer.start -= amount; + buffer.end -= (amount + 1); + unique -= 2; + } + } + } + } + + // double the size of each A and B subarray that will be merged in the next level + if (!iterator.nextLevel()) break; + } +} +// merge operation without a buffer +fn mergeInPlace( + comptime T: type, + items: []T, + A_arg: Range, + B_arg: Range, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + if (A_arg.length() == 0 or B_arg.length() == 0) return; + + // this just repeatedly binary searches into B and rotates A into position. + // the paper suggests using the 'rotation-based Hwang and Lin algorithm' here, + // but I decided to stick with this because it had better situational performance + // + // (Hwang and Lin is designed for merging subarrays of very different sizes, + // but WikiSort almost always uses subarrays that are roughly the same size) + // + // normally this is incredibly suboptimal, but this function is only called + // when none of the A or B blocks in any subarray contained 2√A unique values, + // which places a hard limit on the number of times this will ACTUALLY need + // to binary search and rotate. + // + // according to my analysis the worst case is √A rotations performed on √A items + // once the constant factors are removed, which ends up being O(n) + // + // again, this is NOT a general-purpose solution – it only works well in this case! + // kind of like how the O(n^2) insertion sort is used in some places + + var A = A_arg; + var B = B_arg; + + while (true) { + // find the first place in B where the first item in A needs to be inserted + const mid = binaryFirst(T, items, items[A.start], B, context, lessThan); + + // rotate A into place + const amount = mid - A.end; + mem.rotate(T, items[A.start..mid], A.length()); + if (B.end == mid) break; + + // calculate the new A and B ranges + B.start = mid; + A = Range.init(A.start + amount, B.start); + A.start = binaryLast(T, items, items[A.start], A, context, lessThan); + if (A.length() == 0) break; + } +} + +// merge operation using an internal buffer +fn mergeInternal( + comptime T: type, + items: []T, + A: Range, + B: Range, + buffer: Range, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + // whenever we find a value to add to the final array, swap it with the value that's already in that spot + // when this algorithm is finished, 'buffer' will contain its original contents, but in a different order + var A_count: usize = 0; + var B_count: usize = 0; + var insert: usize = 0; + + if (B.length() > 0 and A.length() > 0) { + while (true) { + if (!lessThan(context, items[B.start + B_count], items[buffer.start + A_count])) { + mem.swap(T, &items[A.start + insert], &items[buffer.start + A_count]); + A_count += 1; + insert += 1; + if (A_count >= A.length()) break; + } else { + mem.swap(T, &items[A.start + insert], &items[B.start + B_count]); + B_count += 1; + insert += 1; + if (B_count >= B.length()) break; + } + } + } + + // swap the remainder of A into the final array + blockSwap(T, items, buffer.start + A_count, A.start + insert, A.length() - A_count); +} + +fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_size: usize) void { + var index: usize = 0; + while (index < block_size) : (index += 1) { + mem.swap(T, &items[start1 + index], &items[start2 + index]); + } +} + +// combine a linear search with a binary search to reduce the number of comparisons in situations +// where have some idea as to how many unique values there are and where the next value might be +fn findFirstForward( + comptime T: type, + items: []T, + value: T, + range: Range, + unique: usize, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + if (range.length() == 0) return range.start; + const skip = math.max(range.length() / unique, @as(usize, 1)); + + var index = range.start + skip; + while (lessThan(context, items[index - 1], value)) : (index += skip) { + if (index >= range.end - skip) { + return binaryFirst(T, items, value, Range.init(index, range.end), context, lessThan); + } + } + + return binaryFirst(T, items, value, Range.init(index - skip, index), context, lessThan); +} + +fn findFirstBackward( + comptime T: type, + items: []T, + value: T, + range: Range, + unique: usize, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + if (range.length() == 0) return range.start; + const skip = math.max(range.length() / unique, @as(usize, 1)); + + var index = range.end - skip; + while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) { + if (index < range.start + skip) { + return binaryFirst(T, items, value, Range.init(range.start, index), context, lessThan); + } + } + + return binaryFirst(T, items, value, Range.init(index, index + skip), context, lessThan); +} + +fn findLastForward( + comptime T: type, + items: []T, + value: T, + range: Range, + unique: usize, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + if (range.length() == 0) return range.start; + const skip = math.max(range.length() / unique, @as(usize, 1)); + + var index = range.start + skip; + while (!lessThan(context, value, items[index - 1])) : (index += skip) { + if (index >= range.end - skip) { + return binaryLast(T, items, value, Range.init(index, range.end), context, lessThan); + } + } + + return binaryLast(T, items, value, Range.init(index - skip, index), context, lessThan); +} + +fn findLastBackward( + comptime T: type, + items: []T, + value: T, + range: Range, + unique: usize, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + if (range.length() == 0) return range.start; + const skip = math.max(range.length() / unique, @as(usize, 1)); + + var index = range.end - skip; + while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) { + if (index < range.start + skip) { + return binaryLast(T, items, value, Range.init(range.start, index), context, lessThan); + } + } + + return binaryLast(T, items, value, Range.init(index, index + skip), context, lessThan); +} + +fn binaryFirst( + comptime T: type, + items: []T, + value: T, + range: Range, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + var curr = range.start; + var size = range.length(); + if (range.start >= range.end) return range.end; + while (size > 0) { + const offset = size % 2; + + size /= 2; + const mid_item = items[curr + size]; + if (lessThan(context, mid_item, value)) { + curr += size + offset; + } + } + return curr; +} + +fn binaryLast( + comptime T: type, + items: []T, + value: T, + range: Range, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) usize { + var curr = range.start; + var size = range.length(); + if (range.start >= range.end) return range.end; + while (size > 0) { + const offset = size % 2; + + size /= 2; + const mid_item = items[curr + size]; + if (!lessThan(context, value, mid_item)) { + curr += size + offset; + } + } + return curr; +} + +fn mergeInto( + comptime T: type, + from: []T, + A: Range, + B: Range, + into: []T, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + var A_index: usize = A.start; + var B_index: usize = B.start; + const A_last = A.end; + const B_last = B.end; + var insert_index: usize = 0; + + while (true) { + if (!lessThan(context, from[B_index], from[A_index])) { + into[insert_index] = from[A_index]; + A_index += 1; + insert_index += 1; + if (A_index == A_last) { + // copy the remainder of B into the final array + const from_b = from[B_index..B_last]; + @memcpy(into[insert_index..][0..from_b.len], from_b); + break; + } + } else { + into[insert_index] = from[B_index]; + B_index += 1; + insert_index += 1; + if (B_index == B_last) { + // copy the remainder of A into the final array + const from_a = from[A_index..A_last]; + @memcpy(into[insert_index..][0..from_a.len], from_a); + break; + } + } + } +} + +fn mergeExternal( + comptime T: type, + items: []T, + A: Range, + B: Range, + cache: []T, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + // A fits into the cache, so use that instead of the internal buffer + var A_index: usize = 0; + var B_index: usize = B.start; + var insert_index: usize = A.start; + const A_last = A.length(); + const B_last = B.end; + + if (B.length() > 0 and A.length() > 0) { + while (true) { + if (!lessThan(context, items[B_index], cache[A_index])) { + items[insert_index] = cache[A_index]; + A_index += 1; + insert_index += 1; + if (A_index == A_last) break; + } else { + items[insert_index] = items[B_index]; + B_index += 1; + insert_index += 1; + if (B_index == B_last) break; + } + } + } + + // copy the remainder of A into the final array + const cache_a = cache[A_index..A_last]; + @memcpy(items[insert_index..][0..cache_a.len], cache_a); +} + +fn swap( + comptime T: type, + items: []T, + order: *[8]u8, + x: usize, + y: usize, + context: anytype, + comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, +) void { + if (lessThan(context, items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(context, items[x], items[y]))) { + mem.swap(T, &items[x], &items[y]); + mem.swap(u8, &(order.*)[x], &(order.*)[y]); + } +} diff --git a/lib/std/sort/pdq.zig b/lib/std/sort/pdq.zig new file mode 100644 index 0000000000..e7042b0c76 --- /dev/null +++ b/lib/std/sort/pdq.zig @@ -0,0 +1,331 @@ +const std = @import("../std.zig"); +const sort = std.sort; +const mem = std.mem; +const math = std.math; +const testing = std.testing; + +/// Unstable in-place sort. n best case, n*log(n) worst case and average case. +/// log(n) memory (no allocator required). +/// +/// Sorts in ascending order with respect to the given `lessThan` function. +pub fn pdq( + comptime T: type, + items: []T, + context: anytype, + comptime lessThanFn: fn (context: @TypeOf(context), lhs: T, rhs: T) bool, +) void { + const Context = struct { + items: []T, + sub_ctx: @TypeOf(context), + + pub fn lessThan(ctx: @This(), a: usize, b: usize) bool { + return lessThanFn(ctx.sub_ctx, ctx.items[a], ctx.items[b]); + } + + pub fn swap(ctx: @This(), a: usize, b: usize) void { + return mem.swap(T, &ctx.items[a], &ctx.items[b]); + } + }; + pdqContext(0, items.len, Context{ .items = items, .sub_ctx = context }); +} + +const Hint = enum { + increasing, + decreasing, + unknown, +}; + +/// Unstable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. +/// O(log(n)) memory (no allocator required). +/// +/// Sorts in ascending order with respect to the given `lessThan` function. +pub fn pdqContext(a: usize, b: usize, context: anytype) void { + // slices of up to this length get sorted using insertion sort. + const max_insertion = 24; + // number of allowed imbalanced partitions before switching to heap sort. + const max_limit = std.math.floorPowerOfTwo(usize, b) + 1; + + // set upper bound on stack memory usage. + const Range = struct { a: usize, b: usize, limit: usize }; + const stack_size = math.log2(math.maxInt(usize) + 1); + var stack: [stack_size]Range = undefined; + var range = Range{ .a = a, .b = b, .limit = max_limit }; + var top: usize = 0; + + while (true) { + var was_balanced = true; + var was_partitioned = true; + + while (true) { + const len = range.b - range.a; + + // very short slices get sorted using insertion sort. + if (len <= max_insertion) { + break sort.insertionContext(range.a, range.b, context); + } + + // if too many bad pivot choices were made, simply fall back to heapsort in order to + // guarantee O(n*log(n)) worst-case. + if (range.limit == 0) { + break sort.heapContext(range.a, range.b, context); + } + + // if the last partitioning was imbalanced, try breaking patterns in the slice by shuffling + // some elements around. Hopefully we'll choose a better pivot this time. + if (!was_balanced) { + breakPatterns(range.a, range.b, context); + range.limit -= 1; + } + + // choose a pivot and try guessing whether the slice is already sorted. + var pivot: usize = 0; + var hint = chosePivot(range.a, range.b, &pivot, context); + + if (hint == .decreasing) { + // The maximum number of swaps was performed, so items are likely + // in reverse order. Reverse it to make sorting faster. + reverseRange(range.a, range.b, context); + pivot = (range.b - 1) - (pivot - range.a); + hint = .increasing; + } + + // if the last partitioning was decently balanced and didn't shuffle elements, and if pivot + // selection predicts the slice is likely already sorted... + if (was_balanced and was_partitioned and hint == .increasing) { + // try identifying several out-of-order elements and shifting them to correct + // positions. If the slice ends up being completely sorted, we're done. + if (partialInsertionSort(range.a, range.b, context)) break; + } + + // if the chosen pivot is equal to the predecessor, then it's the smallest element in the + // slice. Partition the slice into elements equal to and elements greater than the pivot. + // This case is usually hit when the slice contains many duplicate elements. + if (range.a > 0 and !context.lessThan(range.a - 1, pivot)) { + range.a = partitionEqual(range.a, range.b, pivot, context); + continue; + } + + // partition the slice. + var mid = pivot; + was_partitioned = partition(range.a, range.b, &mid, context); + + const left_len = mid - range.a; + const right_len = range.b - mid; + const balanced_threshold = len / 8; + if (left_len < right_len) { + was_balanced = left_len >= balanced_threshold; + stack[top] = .{ .a = range.a, .b = mid, .limit = range.limit }; + top += 1; + range.a = mid + 1; + } else { + was_balanced = right_len >= balanced_threshold; + stack[top] = .{ .a = mid + 1, .b = range.b, .limit = range.limit }; + top += 1; + range.b = mid; + } + } + + top = math.sub(usize, top, 1) catch break; + range = stack[top]; + } +} + +/// partitions `items[a..b]` into elements smaller than `items[pivot]`, +/// followed by elements greater than or equal to `items[pivot]`. +/// +/// sets the new pivot. +/// returns `true` if already partitioned. +fn partition(a: usize, b: usize, pivot: *usize, context: anytype) bool { + // move pivot to the first place + context.swap(a, pivot.*); + + var i = a + 1; + var j = b - 1; + + while (i <= j and context.lessThan(i, a)) i += 1; + while (i <= j and !context.lessThan(j, a)) j -= 1; + + // check if items are already partitioned (no item to swap) + if (i > j) { + // put pivot back to the middle + context.swap(j, a); + pivot.* = j; + return true; + } + + context.swap(i, j); + i += 1; + j -= 1; + + while (true) { + while (i <= j and context.lessThan(i, a)) i += 1; + while (i <= j and !context.lessThan(j, a)) j -= 1; + if (i > j) break; + + context.swap(i, j); + i += 1; + j -= 1; + } + + // TODO: Enable the BlockQuicksort optimization + + context.swap(j, a); + pivot.* = j; + return false; +} + +/// partitions items into elements equal to `items[pivot]` +/// followed by elements greater than `items[pivot]`. +/// +/// it assumed that `items[a..b]` does not contain elements smaller than the `items[pivot]`. +fn partitionEqual(a: usize, b: usize, pivot: usize, context: anytype) usize { + // move pivot to the first place + context.swap(a, pivot); + + var i = a + 1; + var j = b - 1; + + while (true) { + while (i <= j and !context.lessThan(a, i)) i += 1; + while (i <= j and context.lessThan(a, j)) j -= 1; + if (i > j) break; + + context.swap(i, j); + i += 1; + j -= 1; + } + + return i; +} + +/// partially sorts a slice by shifting several out-of-order elements around. +/// +/// returns `true` if the slice is sorted at the end. This function is `O(n)` worst-case. +fn partialInsertionSort(a: usize, b: usize, context: anytype) bool { + @setCold(true); + + // maximum number of adjacent out-of-order pairs that will get shifted + const max_steps = 5; + // if the slice is shorter than this, don't shift any elements + const shortest_shifting = 50; + + var i = a + 1; + for (0..max_steps) |_| { + // find the next pair of adjacent out-of-order elements. + while (i < b and !context.lessThan(i, i - 1)) i += 1; + + // are we done? + if (i == b) return true; + + // don't shift elements on short arrays, that has a performance cost. + if (b - a < shortest_shifting) return false; + + // swap the found pair of elements. This puts them in correct order. + context.swap(i, i - 1); + + // shift the smaller element to the left. + if (i - a >= 2) { + var j = i - 1; + while (j >= 1) : (j -= 1) { + if (!context.lessThan(j, j - 1)) break; + context.swap(j, j - 1); + } + } + + // shift the greater element to the right. + if (b - i >= 2) { + var j = i + 1; + while (j < b) : (j += 1) { + if (!context.lessThan(j, j - 1)) break; + context.swap(j, j - 1); + } + } + } + + return false; +} + +fn breakPatterns(a: usize, b: usize, context: anytype) void { + @setCold(true); + + const len = b - a; + if (len < 8) return; + + var rand = @intCast(u64, len); + const modulus = math.ceilPowerOfTwoAssert(u64, len); + + var i = a + (len / 4) * 2 - 1; + while (i <= a + (len / 4) * 2 + 1) : (i += 1) { + // xorshift64 + rand ^= rand << 13; + rand ^= rand >> 7; + rand ^= rand << 17; + + var other = @intCast(usize, rand & (modulus - 1)); + if (other >= len) other -= len; + context.swap(i, a + other); + } +} + +/// choses a pivot in `items[a..b]`. +/// swaps likely_sorted when `items[a..b]` seems to be already sorted. +fn chosePivot(a: usize, b: usize, pivot: *usize, context: anytype) Hint { + // minimum length for using the Tukey's ninther method + const shortest_ninther = 50; + // max_swaps is the maximum number of swaps allowed in this function + const max_swaps = 4 * 3; + + var len = b - a; + var i = a + len / 4 * 1; + var j = a + len / 4 * 2; + var k = a + len / 4 * 3; + var swaps: usize = 0; + + if (len >= 8) { + if (len >= shortest_ninther) { + // find medians in the neighborhoods of `i`, `j` and `k` + i = sort3(i - 1, i, i + 1, &swaps, context); + j = sort3(j - 1, j, j + 1, &swaps, context); + k = sort3(k - 1, k, k + 1, &swaps, context); + } + + // find the median among `i`, `j` and `k` + j = sort3(i, j, k, &swaps, context); + } + + pivot.* = j; + return switch (swaps) { + 0 => .increasing, + max_swaps => .decreasing, + else => .unknown, + }; +} + +fn sort3(a: usize, b: usize, c: usize, swaps: *usize, context: anytype) usize { + if (context.lessThan(b, a)) { + swaps.* += 1; + context.swap(b, a); + } + + if (context.lessThan(c, b)) { + swaps.* += 1; + context.swap(c, b); + } + + if (context.lessThan(b, a)) { + swaps.* += 1; + context.swap(b, a); + } + + return b; +} + +fn reverseRange(a: usize, b: usize, context: anytype) void { + var i = a; + var j = b - 1; + while (i < j) { + context.swap(i, j); + i += 1; + j -= 1; + } +} diff --git a/src/Compilation.zig b/src/Compilation.zig index b485800329..cc2e2a916b 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -672,7 +672,7 @@ fn addPackageTableToCacheHash( } } // Sort the slice by package name - std.sort.sort(Package.Table.KV, packages, {}, struct { + mem.sort(Package.Table.KV, packages, {}, struct { fn lessThan(_: void, lhs: Package.Table.KV, rhs: Package.Table.KV) bool { return std.mem.lessThan(u8, lhs.key, rhs.key); } diff --git a/src/Package.zig b/src/Package.zig index f28aac885d..cde3f38e28 100644 --- a/src/Package.zig +++ b/src/Package.zig @@ -672,7 +672,7 @@ fn computePackageHash( } } - std.sort.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan); + mem.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan); var hasher = Manifest.Hash.init(.{}); var any_failures = false; diff --git a/src/RangeSet.zig b/src/RangeSet.zig index 7e501f984b..aa051ff424 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -60,7 +60,7 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { if (self.ranges.items.len == 0) return false; - std.sort.sort(Range, self.ranges.items, LessThanContext{ + std.mem.sort(Range, self.ranges.items, LessThanContext{ .ty = ty, .module = self.module, }, lessThan); diff --git a/src/Sema.zig b/src/Sema.zig index 9178392d27..76c9891467 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -30979,7 +30979,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { ctx.struct_obj.fields.values()[b].ty.abiAlignment(target); } }; - std.sort.sort(u32, optimized_order, AlignSortContext{ + mem.sort(u32, optimized_order, AlignSortContext{ .struct_obj = struct_obj, .sema = sema, }, AlignSortContext.lessThan); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e835242379..55a9694fd3 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2176,7 +2176,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { } }; const sort_context = SortContext{ .frame_align = frame_align }; - std.sort.sort(FrameIndex, stack_frame_order, sort_context, SortContext.lessThan); + mem.sort(FrameIndex, stack_frame_order, sort_context, SortContext.lessThan); } const call_frame_align = frame_align[@enumToInt(FrameIndex.call_frame)]; diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 6ed0aeeff4..625a5283b9 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -770,7 +770,7 @@ const mnemonic_to_encodings_map = init: { @setEvalBranchQuota(30_000); const encodings = @import("encodings.zig"); var entries = encodings.table; - std.sort.sort(encodings.Entry, &entries, {}, struct { + std.mem.sort(encodings.Entry, &entries, {}, struct { fn lessThan(_: void, lhs: encodings.Entry, rhs: encodings.Entry) bool { return @enumToInt(lhs[0]) < @enumToInt(rhs[0]); } diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 892914ea3d..8494ae7353 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1292,7 +1292,7 @@ pub const CType = extern union { fn sortFields(self: *@This(), fields_len: usize) []Payload.Fields.Field { const Field = Payload.Fields.Field; const slice = self.storage.anon.fields[0..fields_len]; - std.sort.sort(Field, slice, {}, struct { + mem.sort(Field, slice, {}, struct { fn before(_: void, lhs: Field, rhs: Field) bool { return lhs.alignas.@"align" > rhs.alignas.@"align"; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 81e8c57bdd..01f18a73b3 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1837,7 +1837,7 @@ fn writeBaseRelocations(self: *Coff) !void { pages.appendAssumeCapacity(page.*); } } - std.sort.sort(u32, pages.items, {}, std.sort.asc(u32)); + mem.sort(u32, pages.items, {}, std.sort.asc(u32)); var buffer = std.ArrayList(u8).init(gpa); defer buffer.deinit(); diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index 7cc6f78c7d..b218fdbd2d 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -209,7 +209,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch) // afterwards by address in each group. Normally, dysymtab should // be enough to guarantee the sort, but turns out not every compiler // is kind enough to specify the symbols in the correct order. - sort.sort(SymbolAtIndex, sorted_all_syms.items, self, SymbolAtIndex.lessThan); + mem.sort(SymbolAtIndex, sorted_all_syms.items, self, SymbolAtIndex.lessThan); var prev_sect_id: u8 = 0; var section_index_lookup: ?Entry = null; @@ -462,7 +462,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void { sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) }; } - std.sort.sort(SortedSection, sorted_sections, {}, sectionLessThanByAddress); + mem.sort(SortedSection, sorted_sections, {}, sectionLessThanByAddress); var sect_sym_index: u32 = 0; for (sorted_sections) |section| { @@ -663,7 +663,7 @@ fn parseRelocs(self: *Object, gpa: Allocator, sect_id: u8) !void { if (self.getSourceRelocs(section)) |relocs| { try self.relocations.ensureUnusedCapacity(gpa, relocs.len); self.relocations.appendUnalignedSliceAssumeCapacity(relocs); - std.sort.sort(macho.relocation_info, self.relocations.items[start..], {}, relocGreaterThan); + mem.sort(macho.relocation_info, self.relocations.items[start..], {}, relocGreaterThan); } self.section_relocs_lookup.items[sect_id] = start; } @@ -901,7 +901,7 @@ pub fn parseDataInCode(self: *Object, gpa: Allocator) !void { const dice = @ptrCast([*]align(1) const macho.data_in_code_entry, self.contents.ptr + cmd.dataoff)[0..ndice]; try self.data_in_code.ensureTotalCapacityPrecise(gpa, dice.len); self.data_in_code.appendUnalignedSliceAssumeCapacity(dice); - std.sort.sort(macho.data_in_code_entry, self.data_in_code.items, {}, diceLessThan); + mem.sort(macho.data_in_code_entry, self.data_in_code.items, {}, diceLessThan); } fn diceLessThan(ctx: void, lhs: macho.data_in_code_entry, rhs: macho.data_in_code_entry) bool { diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig index 0071657f8b..8d2a36be9d 100644 --- a/src/link/MachO/UnwindInfo.zig +++ b/src/link/MachO/UnwindInfo.zig @@ -411,7 +411,7 @@ pub fn collect(info: *UnwindInfo, zld: *Zld) !void { } var slice = common_encodings_counts.values(); - std.sort.sort(CommonEncWithCount, slice, {}, CommonEncWithCount.greaterThan); + mem.sort(CommonEncWithCount, slice, {}, CommonEncWithCount.greaterThan); var i: u7 = 0; while (i < slice.len) : (i += 1) { diff --git a/src/link/MachO/dyld_info/Rebase.zig b/src/link/MachO/dyld_info/Rebase.zig index 1d7a0c94c0..5b386a8136 100644 --- a/src/link/MachO/dyld_info/Rebase.zig +++ b/src/link/MachO/dyld_info/Rebase.zig @@ -39,7 +39,7 @@ pub fn finalize(rebase: *Rebase, gpa: Allocator) !void { const writer = rebase.buffer.writer(gpa); - std.sort.sort(Entry, rebase.entries.items, {}, Entry.lessThan); + std.mem.sort(Entry, rebase.entries.items, {}, Entry.lessThan); try setTypePointer(writer); diff --git a/src/link/MachO/dyld_info/bind.zig b/src/link/MachO/dyld_info/bind.zig index 98a693920a..14ce1587aa 100644 --- a/src/link/MachO/dyld_info/bind.zig +++ b/src/link/MachO/dyld_info/bind.zig @@ -47,7 +47,7 @@ pub fn Bind(comptime Ctx: type, comptime Target: type) type { const writer = self.buffer.writer(gpa); - std.sort.sort(Entry, self.entries.items, ctx, Entry.lessThan); + std.mem.sort(Entry, self.entries.items, ctx, Entry.lessThan); var start: usize = 0; var seg_id: ?u8 = null; diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig index 7e6870ecbc..b151aee19b 100644 --- a/src/link/MachO/zld.zig +++ b/src/link/MachO/zld.zig @@ -1441,7 +1441,7 @@ pub const Zld = struct { } } - std.sort.sort(Section, sections.items, {}, SortSection.lessThan); + mem.sort(Section, sections.items, {}, SortSection.lessThan); self.sections.shrinkRetainingCapacity(0); for (sections.items) |out| { @@ -2237,7 +2237,7 @@ pub const Zld = struct { } } - std.sort.sort(u64, addresses.items, {}, asc_u64); + mem.sort(u64, addresses.items, {}, asc_u64); var offsets = std.ArrayList(u32).init(gpa); defer offsets.deinit(); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index cd9c44d656..5dfc91d4ce 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2143,7 +2143,7 @@ fn sortDataSegments(wasm: *Wasm) !void { } }; - std.sort.sort([]const u8, keys, {}, SortContext.sort); + mem.sort([]const u8, keys, {}, SortContext.sort); for (keys) |key| { const segment_index = wasm.data_segments.get(key).?; new_mapping.putAssumeCapacity(key, segment_index); @@ -2187,7 +2187,7 @@ fn setupInitFunctions(wasm: *Wasm) !void { } // sort the initfunctions based on their priority - std.sort.sort(InitFuncLoc, wasm.init_funcs.items, {}, InitFuncLoc.lessThan); + mem.sort(InitFuncLoc, wasm.init_funcs.items, {}, InitFuncLoc.lessThan); } /// Generates an atom containing the global error set' size. @@ -3687,7 +3687,7 @@ fn writeToFile( } }.sort; - std.sort.sort(*Atom, sorted_atoms.items, wasm, atom_sort_fn); + mem.sort(*Atom, sorted_atoms.items, wasm, atom_sort_fn); for (sorted_atoms.items) |sorted_atom| { try leb.writeULEB128(binary_writer, sorted_atom.size); @@ -4050,8 +4050,8 @@ fn emitNameSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), arena: std.mem data_segment_index += 1; } - std.sort.sort(Name, funcs.values(), {}, Name.lessThan); - std.sort.sort(Name, globals.items, {}, Name.lessThan); + mem.sort(Name, funcs.values(), {}, Name.lessThan); + mem.sort(Name, globals.items, {}, Name.lessThan); const header_offset = try reserveCustomSectionHeader(binary_bytes); const writer = binary_bytes.writer(); diff --git a/src/objcopy.zig b/src/objcopy.zig index 12129aba9c..c5d0e8dcb3 100644 --- a/src/objcopy.zig +++ b/src/objcopy.zig @@ -402,7 +402,7 @@ const BinaryElfOutput = struct { } } - std.sort.sort(*BinaryElfSegment, self.segments.items, {}, segmentSortCompare); + mem.sort(*BinaryElfSegment, self.segments.items, {}, segmentSortCompare); for (self.segments.items, 0..) |firstSegment, i| { if (firstSegment.firstSection) |firstSection| { @@ -427,7 +427,7 @@ const BinaryElfOutput = struct { } } - std.sort.sort(*BinaryElfSection, self.sections.items, {}, sectionSortCompare); + mem.sort(*BinaryElfSection, self.sections.items, {}, sectionSortCompare); return self; } diff --git a/test/src/Cases.zig b/test/src/Cases.zig index 0451079a0e..63dd2fd3da 100644 --- a/test/src/Cases.zig +++ b/test/src/Cases.zig @@ -607,7 +607,7 @@ fn sortTestFilenames(filenames: [][]const u8) void { }; } }; - std.sort.sort([]const u8, filenames, Context{}, Context.lessThan); + std.mem.sort([]const u8, filenames, Context{}, Context.lessThan); } /// Iterates a set of filenames extracting batches that are either incremental diff --git a/tools/gen_stubs.zig b/tools/gen_stubs.zig index bc2637e197..95787b719a 100644 --- a/tools/gen_stubs.zig +++ b/tools/gen_stubs.zig @@ -437,7 +437,7 @@ fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian) const dynstr = elf_bytes[dynstr_offset..]; // Sort the list by address, ascending. - std.sort.sort(Sym, @alignCast(8, dyn_syms), {}, S.symbolAddrLessThan); + mem.sort(Sym, @alignCast(8, dyn_syms), {}, S.symbolAddrLessThan); for (dyn_syms) |sym| { const this_section = s(sym.st_shndx); diff --git a/tools/generate_JSONTestSuite.zig b/tools/generate_JSONTestSuite.zig index b8550959c7..2229cf4012 100644 --- a/tools/generate_JSONTestSuite.zig +++ b/tools/generate_JSONTestSuite.zig @@ -23,7 +23,7 @@ pub fn main() !void { while (try it.next()) |entry| { try names.append(try allocator.dupe(u8, entry.name)); } - std.sort.sort([]const u8, names.items, {}, (struct { + std.mem.sort([]const u8, names.items, {}, (struct { fn lessThan(_: void, a: []const u8, b: []const u8) bool { return std.mem.lessThan(u8, a, b); } diff --git a/tools/process_headers.zig b/tools/process_headers.zig index a6550a2573..0321c0e0eb 100644 --- a/tools/process_headers.zig +++ b/tools/process_headers.zig @@ -460,7 +460,7 @@ pub fn main() !void { try contents_list.append(contents); } } - std.sort.sort(*Contents, contents_list.items, {}, Contents.hitCountLessThan); + std.mem.sort(*Contents, contents_list.items, {}, Contents.hitCountLessThan); const best_contents = contents_list.popOrNull().?; if (best_contents.hit_count > 1) { // worth it to make it generic diff --git a/tools/update-linux-headers.zig b/tools/update-linux-headers.zig index 38fbab6645..0f31e5e893 100644 --- a/tools/update-linux-headers.zig +++ b/tools/update-linux-headers.zig @@ -260,7 +260,7 @@ pub fn main() !void { try contents_list.append(contents); } } - std.sort.sort(*Contents, contents_list.items, {}, Contents.hitCountLessThan); + std.mem.sort(*Contents, contents_list.items, {}, Contents.hitCountLessThan); const best_contents = contents_list.popOrNull().?; if (best_contents.hit_count > 1) { // worth it to make it generic diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig index 682ec7e152..feefeb0a83 100644 --- a/tools/update_clang_options.zig +++ b/tools/update_clang_options.zig @@ -646,7 +646,7 @@ pub fn main() anyerror!void { } // Some options have multiple matches. As an example, "-Wl,foo" matches both // "W" and "Wl,". So we sort this list in order of descending priority. - std.sort.sort(*json.ObjectMap, all_objects.items, {}, objectLessThan); + std.mem.sort(*json.ObjectMap, all_objects.items, {}, objectLessThan); var buffered_stdout = std.io.bufferedWriter(std.io.getStdOut().writer()); const stdout = buffered_stdout.writer(); diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig index 53bb365f41..d5c3d48852 100644 --- a/tools/update_cpu_features.zig +++ b/tools/update_cpu_features.zig @@ -1187,8 +1187,8 @@ fn processOneTarget(job: Job) anyerror!void { for (llvm_target.extra_cpus) |extra_cpu| { try all_cpus.append(extra_cpu); } - std.sort.sort(Feature, all_features.items, {}, featureLessThan); - std.sort.sort(Cpu, all_cpus.items, {}, cpuLessThan); + mem.sort(Feature, all_features.items, {}, featureLessThan); + mem.sort(Cpu, all_cpus.items, {}, cpuLessThan); const target_sub_path = try fs.path.join(arena, &.{ "lib", "std", "target" }); var target_dir = try job.zig_src_dir.makeOpenPath(target_sub_path, .{}); @@ -1283,7 +1283,7 @@ fn processOneTarget(job: Job) anyerror!void { try dependencies.append(key.*); } } - std.sort.sort([]const u8, dependencies.items, {}, asciiLessThan); + mem.sort([]const u8, dependencies.items, {}, asciiLessThan); if (dependencies.items.len == 0) { try w.writeAll( @@ -1328,7 +1328,7 @@ fn processOneTarget(job: Job) anyerror!void { try cpu_features.append(key.*); } } - std.sort.sort([]const u8, cpu_features.items, {}, asciiLessThan); + mem.sort([]const u8, cpu_features.items, {}, asciiLessThan); if (cpu.llvm_name) |llvm_name| { try w.print( \\ pub const {} = CpuModel{{ diff --git a/tools/update_spirv_features.zig b/tools/update_spirv_features.zig index 8d398f58de..44d8b6a445 100644 --- a/tools/update_spirv_features.zig +++ b/tools/update_spirv_features.zig @@ -303,7 +303,7 @@ fn gatherVersions(allocator: Allocator, registry: g.CoreRegistry) ![]const Versi } } - std.sort.sort(Version, versions.items, {}, Version.lessThan); + std.mem.sort(Version, versions.items, {}, Version.lessThan); return versions.items; } -- cgit v1.2.3 From dbd44658ff2d392451ea4f3a38ca4bd26da34314 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 13:16:33 -0700 Subject: wasm backend: emit a TODO error rather than miscompile --- src/arch/wasm/CodeGen.zig | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 11b7f65946..5b5c8b7b09 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3090,9 +3090,14 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .ErrorUnion => { const error_type = ty.errorUnionSet(); - const is_pl = val.errorUnionIsPayload(); - const err_val = if (!is_pl) val else Value.initTag(.zero); - return func.lowerConstant(err_val, error_type); + const payload_type = ty.errorUnionPayload(); + if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + // We use the error type directly as the type. + const is_pl = val.errorUnionIsPayload(); + const err_val = if (!is_pl) val else Value.initTag(.zero); + return func.lowerConstant(err_val, error_type); + } + return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, .Optional => if (ty.optionalReprIsPayload()) { var buf: Type.Payload.ElemType = undefined; -- cgit v1.2.3 From ca16f1e8a703491bcaac0d13379d2556e8ca837d Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Fri, 26 May 2023 23:29:05 +0300 Subject: std.Target adjustments * move `ptrBitWidth` from Arch to Target since it needs to know about the abi * double isn't always 8 bits * AVR uses 1-byte alignment for everything in GCC --- lib/std/c/freebsd.zig | 2 +- lib/std/os/linux.zig | 14 ++- lib/std/target.zig | 164 ++++++++++++++++---------------- lib/std/zig/system/NativePaths.zig | 2 +- lib/std/zig/system/NativeTargetInfo.zig | 4 +- src/Sema.zig | 2 +- src/arch/aarch64/CodeGen.zig | 12 +-- src/arch/arm/CodeGen.zig | 2 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/riscv64/abi.zig | 2 +- src/arch/sparc64/CodeGen.zig | 8 +- src/arch/wasm/CodeGen.zig | 2 +- src/arch/x86_64/CodeGen.zig | 4 +- src/codegen.zig | 12 +-- src/codegen/c/type.zig | 2 +- src/codegen/llvm.zig | 24 ++--- src/codegen/spirv.zig | 4 +- src/glibc.zig | 18 ++-- src/link/Coff.zig | 2 +- src/link/Coff/lld.zig | 2 +- src/link/Dwarf.zig | 6 +- src/link/Elf.zig | 12 +-- src/link/Elf/Atom.zig | 2 +- src/link/Plan9.zig | 2 +- src/mingw.zig | 2 +- src/musl.zig | 2 +- src/type.zig | 36 +++---- src/value.zig | 2 +- 28 files changed, 180 insertions(+), 168 deletions(-) (limited to 'src/arch') diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig index 9dfa46c5b9..485e13b8ee 100644 --- a/lib/std/c/freebsd.zig +++ b/lib/std/c/freebsd.zig @@ -322,7 +322,7 @@ pub const RTLD = struct { pub const dl_phdr_info = extern struct { /// Module relocation base. - dlpi_addr: if (builtin.cpu.arch.ptrBitWidth() == 32) std.elf.Elf32_Addr else std.elf.Elf64_Addr, + dlpi_addr: if (builtin.target.ptrBitWidth() == 32) std.elf.Elf32_Addr else std.elf.Elf64_Addr, /// Module name. dlpi_name: ?[*:0]const u8, /// Pointer to module's phdr. diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 9928c25b80..ef0ec94d3b 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -5847,8 +5847,18 @@ pub const AUDIT = struct { fn toAudit(arch: std.Target.Cpu.Arch) u32 { var res: u32 = @enumToInt(arch.toElfMachine()); if (arch.endian() == .Little) res |= LE; - if (arch.ptrBitWidth() == 64) res |= @"64BIT"; - + switch (arch) { + .aarch64, + .mips64, + .mips64el, + .powerpc64, + .powerpc64le, + .riscv64, + .sparc64, + .x86_64, + => res |= @"64BIT", + else => {}, + } return res; } }; diff --git a/lib/std/target.zig b/lib/std/target.zig index 4acc3b3361..6051334d5e 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -1189,77 +1189,6 @@ pub const Target = struct { }; } - pub fn ptrBitWidth(arch: Arch) u16 { - switch (arch) { - .avr, - .msp430, - .spu_2, - => return 16, - - .arc, - .arm, - .armeb, - .csky, - .hexagon, - .m68k, - .le32, - .mips, - .mipsel, - .powerpc, - .powerpcle, - .r600, - .riscv32, - .sparc, - .sparcel, - .tce, - .tcele, - .thumb, - .thumbeb, - .x86, - .xcore, - .nvptx, - .amdil, - .hsail, - .spir, - .kalimba, - .shave, - .lanai, - .wasm32, - .renderscript32, - .aarch64_32, - .spirv32, - .loongarch32, - .dxil, - .xtensa, - => return 32, - - .aarch64, - .aarch64_be, - .mips64, - .mips64el, - .powerpc64, - .powerpc64le, - .riscv64, - .x86_64, - .nvptx64, - .le64, - .amdil64, - .hsail64, - .spir64, - .wasm64, - .renderscript64, - .amdgcn, - .bpfel, - .bpfeb, - .sparc64, - .s390x, - .ve, - .spirv64, - .loongarch64, - => return 64, - } - } - /// Returns a name that matches the lib/std/target/* source file name. pub fn genericName(arch: Arch) []const u8 { return switch (arch) { @@ -1621,7 +1550,7 @@ pub const Target = struct { const copy = S.copy; if (self.abi == .android) { - const suffix = if (self.cpu.arch.ptrBitWidth() == 64) "64" else ""; + const suffix = if (self.ptrBitWidth() == 64) "64" else ""; return print(&result, "/system/bin/linker{s}", .{suffix}); } @@ -1904,6 +1833,83 @@ pub const Target = struct { }; } + pub fn ptrBitWidth(target: std.Target) u16 { + switch (target.abi) { + .gnux32, .muslx32, .gnuabin32, .gnuilp32 => return 32, + .gnuabi64 => return 64, + else => {}, + } + switch (target.cpu.arch) { + .avr, + .msp430, + .spu_2, + => return 16, + + .arc, + .arm, + .armeb, + .csky, + .hexagon, + .m68k, + .le32, + .mips, + .mipsel, + .powerpc, + .powerpcle, + .r600, + .riscv32, + .sparcel, + .tce, + .tcele, + .thumb, + .thumbeb, + .x86, + .xcore, + .nvptx, + .amdil, + .hsail, + .spir, + .kalimba, + .shave, + .lanai, + .wasm32, + .renderscript32, + .aarch64_32, + .spirv32, + .loongarch32, + .dxil, + .xtensa, + => return 32, + + .aarch64, + .aarch64_be, + .mips64, + .mips64el, + .powerpc64, + .powerpc64le, + .riscv64, + .x86_64, + .nvptx64, + .le64, + .amdil64, + .hsail64, + .spir64, + .wasm64, + .renderscript64, + .amdgcn, + .bpfel, + .bpfeb, + .sparc64, + .s390x, + .ve, + .spirv64, + .loongarch64, + => return 64, + + .sparc => return if (std.Target.sparc.featureSetHas(target.cpu.features, .v9)) 64 else 32, + } + } + pub const CType = enum { char, short, @@ -1930,11 +1936,10 @@ pub const Target = struct { .ulong, .longlong, .ulonglong, + .float, + .double, => @divExact(c_type_bit_size(t, c_type), 8), - .float => 4, - .double => 8, - .longdouble => switch (c_type_bit_size(t, c_type)) { 16 => 2, 32 => 4, @@ -1990,7 +1995,7 @@ pub const Target = struct { .char => return 8, .short, .ushort => return 16, .int, .uint, .float => return 32, - .long, .ulong => return target.cpu.arch.ptrBitWidth(), + .long, .ulong => return target.ptrBitWidth(), .longlong, .ulonglong, .double => return 64, .longdouble => switch (target.cpu.arch) { .x86 => switch (target.abi) { @@ -2084,7 +2089,7 @@ pub const Target = struct { .char => return 8, .short, .ushort => return 16, .int, .uint, .float => return 32, - .long, .ulong => return target.cpu.arch.ptrBitWidth(), + .long, .ulong => return target.ptrBitWidth(), .longlong, .ulonglong, .double => return 64, .longdouble => switch (target.cpu.arch) { .x86 => switch (target.abi) { @@ -2256,10 +2261,7 @@ pub const Target = struct { pub fn c_type_alignment(target: Target, c_type: CType) u16 { // Overrides for unusual alignments switch (target.cpu.arch) { - .avr => switch (c_type) { - .short, .ushort => return 2, - else => return 1, - }, + .avr => return 1, .x86 => switch (target.os.tag) { .windows, .uefi => switch (c_type) { .longlong, .ulonglong, .double => return 8, diff --git a/lib/std/zig/system/NativePaths.zig b/lib/std/zig/system/NativePaths.zig index 70c795b0cf..6f001c2e4f 100644 --- a/lib/std/zig/system/NativePaths.zig +++ b/lib/std/zig/system/NativePaths.zig @@ -117,7 +117,7 @@ pub fn detect(allocator: Allocator, native_info: NativeTargetInfo) !NativePaths const triple = try native_target.linuxTriple(allocator); defer allocator.free(triple); - const qual = native_target.cpu.arch.ptrBitWidth(); + const qual = native_target.ptrBitWidth(); // TODO: $ ld --verbose | grep SEARCH_DIR // the output contains some paths that end with lib64, maybe include them too? diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig index 539ad96365..b22580b6d8 100644 --- a/lib/std/zig/system/NativeTargetInfo.zig +++ b/lib/std/zig/system/NativeTargetInfo.zig @@ -1095,7 +1095,7 @@ pub fn getExternalExecutor( if (candidate.target.cpu.arch != builtin.cpu.arch) { return bad_result; } - switch (candidate.target.cpu.arch.ptrBitWidth()) { + switch (candidate.target.ptrBitWidth()) { 32 => return Executor{ .wine = "wine" }, 64 => return Executor{ .wine = "wine64" }, else => return bad_result, @@ -1105,7 +1105,7 @@ pub fn getExternalExecutor( }, .wasi => { if (options.allow_wasmtime) { - switch (candidate.target.cpu.arch.ptrBitWidth()) { + switch (candidate.target.ptrBitWidth()) { 32 => return Executor{ .wasmtime = "wasmtime" }, else => return bad_result, } diff --git a/src/Sema.zig b/src/Sema.zig index 1ee19515aa..e5ca38ac94 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -34002,7 +34002,7 @@ fn intFitsInType( => switch (ty.zigTypeTag()) { .Int => { const info = ty.intInfo(target); - const ptr_bits = target.cpu.arch.ptrBitWidth(); + const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { .signed => info.bits > ptr_bits, .unsigned => info.bits >= ptr_bits, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 649edd3b9c..971ed4749d 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -501,7 +501,7 @@ fn gen(self: *Self) !void { // (or w0 when pointer size is 32 bits). As this register // might get overwritten along the way, save the address // to the stack. - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const ret_ptr_reg = self.registerAlias(.x0, Type.usize); @@ -1512,7 +1512,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const len = try self.resolveInst(bin_op.rhs); const len_ty = self.air.typeOf(bin_op.rhs); - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst); @@ -3362,7 +3362,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const mcv = try self.resolveInst(ty_op.operand); switch (mcv) { @@ -3386,7 +3386,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const mcv = try self.resolveInst(ty_op.operand); switch (mcv) { @@ -4321,7 +4321,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } else if (self.bin_file.cast(link.File.Plan9)) |p9| { const decl_block_index = try p9.seeDecl(func.owner_decl); const decl_block = p9.getDeclBlock(decl_block_index); - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_addr = p9.bases.data; const got_index = decl_block.got_index.?; @@ -5929,7 +5929,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 5353b78e4d..bdc1627bd6 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1035,7 +1035,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { if (self.register_manager.tryAllocReg(maybe_inst, gp)) |reg| { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index d4c7eb0c70..5fb07c5fdc 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -826,7 +826,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { if (self.register_manager.tryAllocReg(inst, gp)) |reg| { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 26286a1e22..bec1b49a4e 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -9,7 +9,7 @@ pub const Class = enum { memory, byval, integer, double_integer }; pub fn classifyType(ty: Type, target: std.Target) Class { std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); - const max_byval_size = target.cpu.arch.ptrBitWidth() * 2; + const max_byval_size = target.ptrBitWidth() * 2; switch (ty.zigTypeTag()) { .Struct => { const bit_size = ty.bitSize(target); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index caceabfb5b..b70bc0f73d 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -876,7 +876,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); @@ -2241,7 +2241,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const mcv = try self.resolveInst(ty_op.operand); switch (mcv) { @@ -2427,7 +2427,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const len = try self.resolveInst(bin_op.rhs); const len_ty = self.air.typeOf(bin_op.rhs); - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); @@ -2485,7 +2485,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const mcv = try self.resolveInst(ty_op.operand); switch (mcv) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 5b5c8b7b09..bdbe9640c2 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1684,7 +1684,7 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void { } fn ptrSize(func: *const CodeGen) u16 { - return @divExact(func.target.cpu.arch.ptrBitWidth(), 8); + return @divExact(func.target.ptrBitWidth(), 8); } fn arch(func: *const CodeGen) std.Target.Cpu.Arch { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 55a9694fd3..05d5da107f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4000,7 +4000,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { registerAlias(dst_reg, dst_abi_size), Memory.sib(.qword, .{ .base = .{ .reg = src_reg }, - .disp = @divExact(self.target.cpu.arch.ptrBitWidth(), 8), + .disp = @divExact(self.target.ptrBitWidth(), 8), }), ); @@ -8131,7 +8131,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } else if (self.bin_file.cast(link.File.Plan9)) |p9| { const decl_block_index = try p9.seeDecl(owner_decl); const decl_block = p9.getDeclBlock(decl_block_index); - const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_addr = p9.bases.data; const got_index = decl_block.got_index.?; diff --git a/src/codegen.zig b/src/codegen.zig index 67d3d8bca7..692c55e380 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -314,7 +314,7 @@ pub fn generateSymbol( }, .Pointer => switch (typed_value.val.tag()) { .null_value => { - switch (target.cpu.arch.ptrBitWidth()) { + switch (target.ptrBitWidth()) { 32 => { mem.writeInt(u32, try code.addManyAsArray(4), 0, endian); if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 4); @@ -328,7 +328,7 @@ pub fn generateSymbol( return Result.ok; }, .zero, .one, .int_u64, .int_big_positive => { - switch (target.cpu.arch.ptrBitWidth()) { + switch (target.ptrBitWidth()) { 32 => { const x = typed_value.val.toUnsignedInt(target); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); @@ -970,7 +970,7 @@ fn lowerDeclRef( return Result.ok; } - const ptr_width = target.cpu.arch.ptrBitWidth(); + const ptr_width = target.ptrBitWidth(); const decl = module.declPtr(decl_index); const is_fn_body = decl.ty.zigTypeTag() == .Fn; if (!is_fn_body and !decl.ty.hasRuntimeBits()) { @@ -1059,7 +1059,7 @@ fn genDeclRef( log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(module), tv.val.fmtValue(tv.ty, module) }); const target = bin_file.options.target; - const ptr_bits = target.cpu.arch.ptrBitWidth(); + const ptr_bits = target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const decl = module.declPtr(decl_index); @@ -1137,7 +1137,7 @@ fn genUnnamedConst( } else if (bin_file.cast(link.File.Coff)) |_| { return GenResult.mcv(.{ .load_direct = local_sym_index }); } else if (bin_file.cast(link.File.Plan9)) |p9| { - const ptr_bits = target.cpu.arch.ptrBitWidth(); + const ptr_bits = target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_index = local_sym_index; // the plan9 backend returns the got_index const got_addr = p9.bases.data + got_index * ptr_bytes; @@ -1168,7 +1168,7 @@ pub fn genTypedValue( return GenResult.mcv(.undef); const target = bin_file.options.target; - const ptr_bits = target.cpu.arch.ptrBitWidth(); + const ptr_bits = target.ptrBitWidth(); if (!typed_value.ty.isSlice()) { if (typed_value.val.castTag(.variable)) |payload| { diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 8494ae7353..6116d070e6 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -879,7 +879,7 @@ pub const CType = extern union { .pointer_const, .pointer_volatile, .pointer_const_volatile, - => @divExact(target.cpu.arch.ptrBitWidth(), 8), + => @divExact(target.ptrBitWidth(), 8), .uint16_t, .int16_t, .zig_f16 => 2, .uint32_t, .int32_t, .zig_f32 => 4, .uint64_t, .int64_t, .zig_f64 => 8, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index f4b56c9084..f3fd8ec69e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -591,7 +591,7 @@ pub const Object = struct { const target = mod.getTarget(); const llvm_ptr_ty = self.context.pointerType(0); // TODO: Address space - const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth()); + const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); const type_fields = [_]*llvm.Type{ llvm_ptr_ty, llvm_usize_ty, @@ -1114,7 +1114,7 @@ pub const Object = struct { llvm_arg_i += 1; const field_ptr = builder.buildStructGEP(llvm_ty, arg_ptr, field_i, ""); const store_inst = builder.buildStore(param, field_ptr); - store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8); + store_inst.setAlignment(target.ptrBitWidth() / 8); } const is_by_ref = isByRef(param_ty); @@ -1718,7 +1718,7 @@ pub const Object = struct { defer gpa.free(name); const ptr_di_ty = dib.createPointerType( elem_di_ty, - target.cpu.arch.ptrBitWidth(), + target.ptrBitWidth(), ty.ptrAlignment(target) * 8, name, ); @@ -4071,7 +4071,7 @@ pub const DeclGen = struct { .Struct => { if (parent_ty.containerLayout() == .Packed) { if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth()); + const llvm_usize = dg.context.intType(target.ptrBitWidth()); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); // count bits of fields before this one const prev_bits = b: { @@ -4261,7 +4261,7 @@ pub const DeclGen = struct { // instruction is followed by a `wrap_optional`, it will return this value // verbatim, and the result should test as non-null. const target = dg.module.getTarget(); - const int = switch (target.cpu.arch.ptrBitWidth()) { + const int = switch (target.ptrBitWidth()) { 16 => llvm_usize.constInt(0xaaaa, .False), 32 => llvm_usize.constInt(0xaaaaaaaa, .False), 64 => llvm_usize.constInt(0xaaaaaaaa_aaaaaaaa, .False), @@ -4910,7 +4910,7 @@ pub const FuncGen = struct { const i = @intCast(c_uint, i_usize); const field_ptr = self.builder.buildStructGEP(llvm_ty, arg_ptr, i, ""); const load_inst = self.builder.buildLoad(field_ty, field_ptr, ""); - load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8); + load_inst.setAlignment(target.ptrBitWidth() / 8); llvm_args.appendAssumeCapacity(load_inst); } }, @@ -5579,7 +5579,7 @@ pub const FuncGen = struct { const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); const target = self.dg.module.getTarget(); - const llvm_usize = self.context.intType(target.cpu.arch.ptrBitWidth()); + const llvm_usize = self.context.intType(target.ptrBitWidth()); const cond_int = if (cond.typeOf().getTypeKind() == .Pointer) self.builder.buildPtrToInt(cond, llvm_usize, "") else @@ -5787,7 +5787,7 @@ pub const FuncGen = struct { fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { const target = fg.dg.module.getTarget(); - const llvm_usize_ty = fg.context.intType(target.cpu.arch.ptrBitWidth()); + const llvm_usize_ty = fg.context.intType(target.ptrBitWidth()); switch (ty.ptrSize()) { .Slice => { const len = fg.builder.buildExtractValue(ptr, 1, ""); @@ -6085,7 +6085,7 @@ pub const FuncGen = struct { if (field_offset == 0) { return field_ptr; } - const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth()); + const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); const field_ptr_int = self.builder.buildPtrToInt(field_ptr, llvm_usize_ty, ""); const base_ptr_int = self.builder.buildNUWSub(field_ptr_int, llvm_usize_ty.constInt(field_offset, .False), ""); @@ -8534,7 +8534,7 @@ pub const FuncGen = struct { const body_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetBody"); const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd"); - const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth()); + const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); const len = switch (ptr_ty.ptrSize()) { .Slice => self.builder.buildExtractValue(dest_slice, 1, ""), .One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False), @@ -10013,7 +10013,7 @@ pub const FuncGen = struct { fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) void { const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545; const target = fg.dg.module.getTarget(); - const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth()); + const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); const zero = usize_llvm_ty.constInt(0, .False); const req = usize_llvm_ty.constInt(VG_USERREQ__MAKE_MEM_UNDEFINED, .False); const ptr_as_usize = fg.builder.buildPtrToInt(ptr, usize_llvm_ty, ""); @@ -10033,7 +10033,7 @@ pub const FuncGen = struct { const target = fg.dg.module.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; - const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth()); + const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target)); const array_llvm_ty = usize_llvm_ty.arrayType(6); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index cb8b5a787d..2536158b36 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -556,7 +556,7 @@ pub const DeclGen = struct { // TODO: Double check pointer sizes here. // shared pointers might be u32... const target = self.dg.getTarget(); - const width = @divExact(target.cpu.arch.ptrBitWidth(), 8); + const width = @divExact(target.ptrBitWidth(), 8); if (self.size % width != 0) { return self.dg.todo("misaligned pointer constants", .{}); } @@ -1160,7 +1160,7 @@ pub const DeclGen = struct { /// Create an integer type that represents 'usize'. fn sizeType(self: *DeclGen) !SpvType.Ref { - return try self.intType(.unsigned, self.getTarget().cpu.arch.ptrBitWidth()); + return try self.intType(.unsigned, self.getTarget().ptrBitWidth()); } /// Generate a union type, optionally with a known field. If the tag alignment is greater diff --git a/src/glibc.zig b/src/glibc.zig index 327e4f4bb9..1a8ce5ff15 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -378,7 +378,7 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![ const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le; const is_aarch64 = arch == .aarch64 or arch == .aarch64_be; const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparc64; - const is_64 = arch.ptrBitWidth() == 64; + const is_64 = comp.getTarget().ptrBitWidth() == 64; const s = path.sep_str; @@ -435,7 +435,6 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([]const u8)) error{OutOfMemory}!void { const target = comp.getTarget(); - const arch = target.cpu.arch; const opt_nptl: ?[]const u8 = if (target.os.tag == .linux) "nptl" else "htl"; const s = path.sep_str; @@ -444,11 +443,11 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([ try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "include")); if (target.os.tag == .linux) { - try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv" ++ s ++ "linux")); + try add_include_dirs_arch(arena, args, target, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv" ++ s ++ "linux")); } if (opt_nptl) |nptl| { - try add_include_dirs_arch(arena, args, arch, nptl, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps")); + try add_include_dirs_arch(arena, args, target, nptl, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps")); } if (target.os.tag == .linux) { @@ -474,12 +473,12 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([ try args.append("-I"); try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix" ++ s ++ "sysv")); - try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix")); + try add_include_dirs_arch(arena, args, target, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix")); try args.append("-I"); try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "unix")); - try add_include_dirs_arch(arena, args, arch, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps")); + try add_include_dirs_arch(arena, args, target, null, try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps")); try args.append("-I"); try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "generic")); @@ -489,7 +488,7 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([ try args.append("-I"); try args.append(try std.fmt.allocPrint(arena, "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}", .{ - comp.zig_lib_directory.path.?, @tagName(arch), @tagName(target.os.tag), @tagName(target.abi), + comp.zig_lib_directory.path.?, @tagName(target.cpu.arch), @tagName(target.os.tag), @tagName(target.abi), })); try args.append("-I"); @@ -508,15 +507,16 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([ fn add_include_dirs_arch( arena: Allocator, args: *std.ArrayList([]const u8), - arch: std.Target.Cpu.Arch, + target: std.Target, opt_nptl: ?[]const u8, dir: []const u8, ) error{OutOfMemory}!void { + const arch = target.cpu.arch; const is_x86 = arch == .x86 or arch == .x86_64; const is_aarch64 = arch == .aarch64 or arch == .aarch64_be; const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le; const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparc64; - const is_64 = arch.ptrBitWidth() == 64; + const is_64 = target.ptrBitWidth() == 64; const s = path.sep_str; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 01f18a73b3..62a208406e 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -245,7 +245,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option } pub fn createEmpty(gpa: Allocator, options: link.Options) !*Coff { - const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) { + const ptr_width: PtrWidth = switch (options.target.ptrBitWidth()) { 0...32 => .p32, 33...64 => .p64, else => return error.UnsupportedCOFFArchitecture, diff --git a/src/link/Coff/lld.zig b/src/link/Coff/lld.zig index 78eb2d39e5..656b0f9a97 100644 --- a/src/link/Coff/lld.zig +++ b/src/link/Coff/lld.zig @@ -199,7 +199,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod } else if (target.cpu.arch == .x86_64) { try argv.append("-MACHINE:X64"); } else if (target.cpu.arch.isARM()) { - if (target.cpu.arch.ptrBitWidth() == 32) { + if (target.ptrBitWidth() == 32) { try argv.append("-MACHINE:ARM"); } else { try argv.append("-MACHINE:ARM64"); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 1a064049fc..1d358a29ab 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -260,7 +260,7 @@ pub const DeclState = struct { .Pointer => { if (ty.isSlice()) { // Slices are structs: struct { .ptr = *, .len = N } - const ptr_bits = target.cpu.arch.ptrBitWidth(); + const ptr_bits = target.ptrBitWidth(); const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8)); // DW.AT.structure_type try dbg_info_buffer.ensureUnusedCapacity(2); @@ -751,7 +751,7 @@ pub const DeclState = struct { .memory, .linker_load, => { - const ptr_width = @intCast(u8, @divExact(target.cpu.arch.ptrBitWidth(), 8)); + const ptr_width = @intCast(u8, @divExact(target.ptrBitWidth(), 8)); try dbg_info.ensureUnusedCapacity(2 + ptr_width); dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc 1 + ptr_width + @boolToInt(is_ptr), @@ -928,7 +928,7 @@ const min_nop_size = 2; const ideal_factor = 3; pub fn init(allocator: Allocator, bin_file: *File, target: std.Target) Dwarf { - const ptr_width: PtrWidth = switch (target.cpu.arch.ptrBitWidth()) { + const ptr_width: PtrWidth = switch (target.ptrBitWidth()) { 0...32 => .p32, 33...64 => .p64, else => unreachable, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index f90f4ebd46..9fa48a9e62 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -273,7 +273,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option } pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf { - const ptr_width: PtrWidth = switch (options.target.cpu.arch.ptrBitWidth()) { + const ptr_width: PtrWidth = switch (options.target.ptrBitWidth()) { 0...32 => .p32, 33...64 => .p64, else => return error.UnsupportedELFArchitecture, @@ -474,7 +474,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { if (self.phdr_table_load_index == null) { self.phdr_table_load_index = @intCast(u16, self.program_headers.items.len); // TODO Same as for GOT - const phdr_addr: u64 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x1000000 else 0x1000; + const phdr_addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0x1000000 else 0x1000; const p_align = self.page_size; try self.program_headers.append(gpa, .{ .p_type = elf.PT_LOAD, @@ -521,7 +521,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { // TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at. // we'll need to re-use that function anyway, in case the GOT grows and overlaps something // else in virtual memory. - const got_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x4000000 else 0x8000; + const got_addr: u32 = if (self.base.options.target.ptrBitWidth() >= 32) 0x4000000 else 0x8000; try self.program_headers.append(gpa, .{ .p_type = elf.PT_LOAD, .p_offset = off, @@ -544,7 +544,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { const off = self.findFreeSpace(file_size, p_align); log.debug("found PT_LOAD RO free space 0x{x} to 0x{x}", .{ off, off + file_size }); // TODO Same as for GOT - const rodata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0xc000000 else 0xa000; + const rodata_addr: u32 = if (self.base.options.target.ptrBitWidth() >= 32) 0xc000000 else 0xa000; try self.program_headers.append(gpa, .{ .p_type = elf.PT_LOAD, .p_offset = off, @@ -567,7 +567,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { const off = self.findFreeSpace(file_size, p_align); log.debug("found PT_LOAD RW free space 0x{x} to 0x{x}", .{ off, off + file_size }); // TODO Same as for GOT - const rwdata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x10000000 else 0xc000; + const rwdata_addr: u32 = if (self.base.options.target.ptrBitWidth() >= 32) 0x10000000 else 0xc000; try self.program_headers.append(gpa, .{ .p_type = elf.PT_LOAD, .p_offset = off, @@ -3180,7 +3180,7 @@ fn ptrWidthBytes(self: Elf) u8 { /// Does not necessarily match `ptrWidthBytes` for example can be 2 bytes /// in a 32-bit ELF file. fn archPtrWidthBytes(self: Elf) u8 { - return @intCast(u8, self.base.options.target.cpu.arch.ptrBitWidth() / 8); + return @intCast(u8, self.base.options.target.ptrBitWidth() / 8); } fn progHeaderTo32(phdr: elf.Elf64_Phdr) elf.Elf32_Phdr { diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index 70be5abbca..b437be3282 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -59,7 +59,7 @@ pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 { const sym_index = self.getSymbolIndex().?; const got_entry_index = elf_file.got_table.lookup.get(sym_index).?; const target = elf_file.base.options.target; - const ptr_bits = target.cpu.arch.ptrBitWidth(); + const ptr_bits = target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got = elf_file.program_headers.items[elf_file.phdr_got_index.?]; return got.p_vaddr + got_entry_index * ptr_bytes; diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index bef06d1c87..a785f084cb 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -183,7 +183,7 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases { pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 { if (options.use_llvm) return error.LLVMBackendDoesNotSupportPlan9; - const sixtyfour_bit: bool = switch (options.target.cpu.arch.ptrBitWidth()) { + const sixtyfour_bit: bool = switch (options.target.ptrBitWidth()) { 0...32 => false, 33...64 => true, else => return error.UnsupportedP9Architecture, diff --git a/src/mingw.zig b/src/mingw.zig index a85645e80b..318556376e 100644 --- a/src/mingw.zig +++ b/src/mingw.zig @@ -265,7 +265,7 @@ fn add_cc_args( }); const target = comp.getTarget(); - if (target.cpu.arch.isARM() and target.cpu.arch.ptrBitWidth() == 32) { + if (target.cpu.arch.isARM() and target.ptrBitWidth() == 32) { try args.append("-mfpu=vfp"); } diff --git a/src/musl.zig b/src/musl.zig index 4a3f1e6dde..a4e91288f5 100644 --- a/src/musl.zig +++ b/src/musl.zig @@ -194,7 +194,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progr const arch_define = try std.fmt.allocPrint(arena, "-DARCH_{s}", .{ @tagName(target.cpu.arch), }); - const clang_argv: []const []const u8 = if (target.cpu.arch.ptrBitWidth() == 64) + const clang_argv: []const []const u8 = if (target.ptrBitWidth() == 64) &[_][]const u8{ "-DPTR64", arch_define } else &[_][]const u8{arch_define}; diff --git a/src/type.zig b/src/type.zig index aea2d88571..4023b5ba66 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2936,7 +2936,7 @@ pub const Type = extern union { .manyptr_const_u8_sentinel_0, .@"anyframe", .anyframe_T, - => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, @@ -3007,7 +3007,7 @@ pub const Type = extern union { const child_type = ty.optionalChild(&buf); switch (child_type.zigTypeTag()) { - .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat), .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, else => {}, @@ -3069,7 +3069,7 @@ pub const Type = extern union { // We'll guess "pointer-aligned", if the struct has an // underaligned pointer field then some allocations // might require explicit alignment. - return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }; + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; } _ = try sema.resolveTypeFields(ty); } @@ -3195,7 +3195,7 @@ pub const Type = extern union { // We'll guess "pointer-aligned", if the union has an // underaligned pointer field then some allocations // might require explicit alignment. - return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }; + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; } _ = try sema.resolveTypeFields(ty); } @@ -3419,17 +3419,17 @@ pub const Type = extern union { .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .const_slice, .mut_slice, .const_slice_u8, .const_slice_u8_sentinel_0, - => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2 }, + => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2 }, - else => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, }, .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, @@ -3702,20 +3702,20 @@ pub const Type = extern union { .usize, .@"anyframe", .anyframe_T, - => return target.cpu.arch.ptrBitWidth(), + => return target.ptrBitWidth(), .const_slice, .mut_slice, - => return target.cpu.arch.ptrBitWidth() * 2, + => return target.ptrBitWidth() * 2, .const_slice_u8, .const_slice_u8_sentinel_0, - => return target.cpu.arch.ptrBitWidth() * 2, + => return target.ptrBitWidth() * 2, .optional_single_const_pointer, .optional_single_mut_pointer, => { - return target.cpu.arch.ptrBitWidth(); + return target.ptrBitWidth(); }, .single_const_pointer, @@ -3725,18 +3725,18 @@ pub const Type = extern union { .c_const_pointer, .c_mut_pointer, => { - return target.cpu.arch.ptrBitWidth(); + return target.ptrBitWidth(); }, .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return target.cpu.arch.ptrBitWidth() * 2, - else => return target.cpu.arch.ptrBitWidth(), + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth(), }, .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - => return target.cpu.arch.ptrBitWidth(), + => return target.ptrBitWidth(), .c_char => return target.c_type_bit_size(.char), .c_short => return target.c_type_bit_size(.short), @@ -4624,8 +4624,8 @@ pub const Type = extern union { .i64 => return .{ .signedness = .signed, .bits = 64 }, .u128 => return .{ .signedness = .unsigned, .bits = 128 }, .i128 => return .{ .signedness = .signed, .bits = 128 }, - .usize => return .{ .signedness = .unsigned, .bits = target.cpu.arch.ptrBitWidth() }, - .isize => return .{ .signedness = .signed, .bits = target.cpu.arch.ptrBitWidth() }, + .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, + .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, diff --git a/src/value.zig b/src/value.zig index 93edf60eb2..613c3d9ca6 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1922,7 +1922,7 @@ pub const Value = extern union { .variable, .eu_payload_ptr, .opt_payload_ptr, - => return target.cpu.arch.ptrBitWidth(), + => return target.ptrBitWidth(), else => { var buffer: BigIntSpace = undefined; -- cgit v1.2.3 From 0c438ab61635d1354e454b918a06341cff4821d5 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 29 May 2023 21:45:25 -0400 Subject: x86_64: hotfix for crash during in-memory coercion of large type Unblocks #15768 Closes #15904 --- src/arch/x86_64/CodeGen.zig | 49 ++++++++++++++++++++------------------- test/behavior/maximum_minimum.zig | 1 - 2 files changed, 25 insertions(+), 25 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 05d5da107f..59c6551de1 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -10175,37 +10175,38 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; const src_signedness = if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_signedness == src_signedness) break :result dst_mcv; + const abi_size = @intCast(u16, dst_ty.abiSize(self.target.*)); const bit_size = @intCast(u16, dst_ty.bitSize(self.target.*)); - const dst_limbs_len = math.divCeil(u16, bit_size, 64) catch unreachable; - if (dst_signedness != src_signedness and abi_size * 8 > bit_size) { - const high_reg = if (dst_mcv.isRegister()) - dst_mcv.getReg().? - else - try self.copyToTmpRegister( - Type.usize, - dst_mcv.address().offset((dst_limbs_len - 1) * 8).deref(), - ); - const high_lock = self.register_manager.lockReg(high_reg); - defer if (high_lock) |lock| self.register_manager.unlockReg(lock); - - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (dst_signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = bit_size % 64, - }; - const high_ty = Type.initPayload(&high_pl.base); + if (abi_size * 8 <= bit_size) break :result dst_mcv; - try self.truncateRegister(high_ty, high_reg); - if (!dst_mcv.isRegister()) try self.genCopy( + const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable; + const high_reg = if (dst_mcv.isRegister()) + dst_mcv.getReg().? + else + try self.copyToTmpRegister( Type.usize, dst_mcv.address().offset((dst_limbs_len - 1) * 8).deref(), - .{ .register = high_reg }, ); - } + const high_lock = self.register_manager.lockReg(high_reg); + defer if (high_lock) |lock| self.register_manager.unlockReg(lock); + + var high_pl = Type.Payload.Bits{ + .base = .{ .tag = switch (dst_signedness) { + .signed => .int_signed, + .unsigned => .int_unsigned, + } }, + .data = bit_size % 64, + }; + const high_ty = Type.initPayload(&high_pl.base); + try self.truncateRegister(high_ty, high_reg); + if (!dst_mcv.isRegister()) try self.genCopy( + Type.usize, + dst_mcv.address().offset((dst_limbs_len - 1) * 8).deref(), + .{ .register = high_reg }, + ); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index 2750dbdb6f..932a904421 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -197,7 +197,6 @@ test "@min/@max notices vector bounds" { test "@min/@max on comptime_int" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO -- cgit v1.2.3 From 00dedabc41322bc2b4978ddc39ee17b72193f194 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sun, 21 May 2023 15:16:48 +0200 Subject: wasm: `memcpy` support elem abi-size > 1 Previously it was incorrectly assumed that all memcopy's generated by the `memcpy` AIR instruction had an element size of 1 byte. However, this would result in miscompilations for pointer's to arrays where the element size of the array was larger than 1 byte. We now corectly calculate this size. --- src/arch/wasm/CodeGen.zig | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index bdbe9640c2..7872d89e9b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -5295,11 +5295,23 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); const dst_ty = func.air.typeOf(bin_op.lhs); + const ptr_elem_ty = dst_ty.childType(); const src = try func.resolveInst(bin_op.rhs); const src_ty = func.air.typeOf(bin_op.rhs); const len = switch (dst_ty.ptrSize()) { - .Slice => try func.sliceLen(dst), - .One => @as(WValue, .{ .imm32 = @intCast(u32, dst_ty.childType().arrayLen()) }), + .Slice => blk: { + const slice_len = try func.sliceLen(dst); + if (ptr_elem_ty.abiSize(func.target) != 1) { + try func.emitWValue(slice_len); + try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(func.target)) }); + try func.addTag(.i32_mul); + try func.addLabel(.local_set, slice_len.local.value); + } + break :blk slice_len; + }, + .One => @as(WValue, .{ + .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(func.target)), + }), .C, .Many => unreachable, }; const dst_ptr = try func.sliceOrArrayPtr(dst, dst_ty); -- cgit v1.2.3 From 49fddbf4c11d4ea493c1e5b6176052aeacc1ad10 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 22 May 2023 19:06:38 +0200 Subject: wasm: `union_init` correctly store the tag Previously we would only store the payload, but not the actual tag that was set. This meant miscompilations where it would incorrectly return the tag value. This also adds a tiny optimization for payloads which are not `byRef` by directly storing them based on offset, rather than first calculating a pointer to an offset. --- src/arch/wasm/CodeGen.zig | 40 +++++++++++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 7 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 7872d89e9b..ff09919492 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4982,26 +4982,52 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = result: { const union_ty = func.air.typeOfIndex(inst); const layout = union_ty.unionGetLayout(func.target); + const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const field = union_obj.fields.values()[extra.field_index]; + const field_name = union_obj.fields.keys()[extra.field_index]; + + const tag_int = blk: { + const tag_ty = union_ty.unionTagTypeHypothetical(); + const enum_field_index = tag_ty.enumFieldIndex(field_name).?; + var tag_val_payload: Value.Payload.U32 = .{ + .base = .{ .tag = .enum_field_index }, + .data = @intCast(u32, enum_field_index), + }; + const tag_val = Value.initPayload(&tag_val_payload.base); + break :blk try func.lowerConstant(tag_val, tag_ty); + }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { break :result WValue{ .none = {} }; } assert(!isByRef(union_ty, func.target)); - break :result WValue{ .imm32 = extra.field_index }; + break :result tag_int; } assert(isByRef(union_ty, func.target)); const result_ptr = try func.allocStack(union_ty); const payload = try func.resolveInst(extra.init); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; - assert(union_obj.haveFieldTypes()); - const field = union_obj.fields.values()[extra.field_index]; - if (layout.tag_align >= layout.payload_align) { - const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); - try func.store(payload_ptr, payload, field.ty, 0); + if (isByRef(field.ty, func.target)) { + const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); + try func.store(payload_ptr, payload, field.ty, 0); + } else { + try func.store(result_ptr, payload, field.ty, @intCast(u32, layout.tag_size)); + } + + if (layout.tag_size > 0) { + try func.store(result_ptr, tag_int, union_obj.tag_ty, 0); + } } else { try func.store(result_ptr, payload, field.ty, 0); + if (layout.tag_size > 0) { + try func.store( + result_ptr, + tag_int, + union_obj.tag_ty, + @intCast(u32, layout.payload_size), + ); + } } break :result result_ptr; }; -- cgit v1.2.3 From 7cfc44d86ff56fd760eaf0781cc3ba0650267af9 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Tue, 23 May 2023 19:55:33 +0200 Subject: wasm: implement `struct_field_val` for packed unions We currently have `isRef` return true for any type of union, including packed unions. This means we can simply load it from the data section to the exact type we want. In the future we can optimize it so it works similarly to packed structs below 64 bits which do not get stored in the data section and are not passed by ref. --- src/arch/wasm/CodeGen.zig | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index ff09919492..4514311160 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3617,7 +3617,6 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .Packed => switch (struct_ty.zigTypeTag()) { .Struct => result: { const struct_obj = struct_ty.castTag(.@"struct").?.data; - assert(struct_obj.layout == .Packed); const offset = struct_obj.packedFieldBitOffset(func.target, field_index); const backing_ty = struct_obj.backing_int_ty; const wasm_bits = toWasmBits(backing_ty.intInfo(func.target).bits) orelse { @@ -3661,7 +3660,10 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const truncated = try func.trunc(shifted_value, field_ty, backing_ty); break :result try truncated.toLocal(func, field_ty); }, - .Union => return func.fail("TODO: airStructFieldVal for packed unions", .{}), + .Union => result: { + const val = try func.load(operand, field_ty, 0); + break :result try val.toLocal(func, field_ty); + }, else => unreachable, }, else => result: { @@ -5032,7 +5034,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result result_ptr; }; - func.finishAir(inst, result, &.{extra.init}); + return func.finishAir(inst, result, &.{extra.init}); } fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { -- cgit v1.2.3 From 3c72b4d25eb14bbdc03503c368a750b1ca1b7c28 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Thu, 25 May 2023 17:52:39 +0200 Subject: wasm: support and optimize for all packed unions For packed unions where its abi size is less than or equal to 8 bytes we store it directly and don't pass it by reference. This means that when retrieving the field, we will perform shifts and bitcasts to ensure the correct type is returned. For larger packed unions, we either allocate a new stack value based on the field type when the field type is also passed by reference, or load it directly into a local if it's not. --- src/arch/wasm/CodeGen.zig | 55 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 3 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 4514311160..74eb075eab 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1716,7 +1716,14 @@ fn isByRef(ty: Type, target: std.Target) bool { .Array, .Frame, .Union, - => return ty.hasRuntimeBitsIgnoreComptime(), + => { + if (ty.castTag(.@"union")) |union_ty| { + if (union_ty.data.layout == .Packed) { + return ty.abiSize(target) > 8; + } + } + return ty.hasRuntimeBitsIgnoreComptime(); + }, .Struct => { if (ty.castTag(.@"struct")) |struct_ty| { const struct_obj = struct_ty.data; @@ -3131,6 +3138,14 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable; return func.storeSimdImmd(buf); }, + .Union => { + // in this case we have a packed union which will not be passed by reference. + const union_ty = ty.cast(Type.Payload.Union).?.data; + const union_obj = val.castTag(.@"union").?.data; + const field_index = ty.unionTagFieldIndex(union_obj.tag, func.bin_file.base.options.module.?).?; + const field_ty = union_ty.fields.values()[field_index].ty; + return func.lowerConstant(union_obj.val, field_ty); + }, else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), } } @@ -3661,8 +3676,42 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try truncated.toLocal(func, field_ty); }, .Union => result: { - const val = try func.load(operand, field_ty, 0); - break :result try val.toLocal(func, field_ty); + if (isByRef(struct_ty, func.target)) { + if (!isByRef(field_ty, func.target)) { + const val = try func.load(operand, field_ty, 0); + break :result try val.toLocal(func, field_ty); + } else { + const new_stack_val = try func.allocStack(field_ty); + try func.store(new_stack_val, operand, field_ty, 0); + break :result new_stack_val; + } + } + + var payload: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, struct_ty.bitSize(func.target)), + }; + const union_int_type = Type.initPayload(&payload.base); + if (field_ty.zigTypeTag() == .Float) { + var int_payload: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, field_ty.bitSize(func.target)), + }; + const int_type = Type.initPayload(&int_payload.base); + const truncated = try func.trunc(operand, int_type, union_int_type); + const bitcasted = try func.bitcast(field_ty, int_type, truncated); + break :result try bitcasted.toLocal(func, field_ty); + } else if (field_ty.isPtrAtRuntime()) { + var int_payload: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, field_ty.bitSize(func.target)), + }; + const int_type = Type.initPayload(&int_payload.base); + const truncated = try func.trunc(operand, int_type, union_int_type); + break :result try truncated.toLocal(func, field_ty); + } + const truncated = try func.trunc(operand, field_ty, union_int_type); + break :result try truncated.toLocal(func, field_ty); }, else => unreachable, }, -- cgit v1.2.3 From ffa89d3b8370377b56be594650c0ea73f225c926 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Fri, 26 May 2023 17:30:51 +0200 Subject: wasm: `UnwrapErrUnionPayloadPtr` ensure ptr ret When the paylaod is zero-sized we must ensure a valid pointer is still returned for the ptr variation of the instruction. This, because it's valid to have a pointer to a zero-sized value. In such a case, we simply return the operand. --- src/arch/wasm/CodeGen.zig | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 74eb075eab..1cd282f5f7 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3161,7 +3161,7 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue { fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { switch (ty.zigTypeTag()) { .Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa }, - .Int => switch (ty.intInfo(func.target).bits) { + .Int, .Enum => switch (ty.intInfo(func.target).bits) { 0...32 => return WValue{ .imm32 = 0xaaaaaaaa }, 33...64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa }, else => unreachable, @@ -3958,7 +3958,12 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const payload_ty = err_ty.errorUnionPayload(); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result WValue{ .none = {} }; + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (op_is_ptr) { + break :result WValue{ .imm32 = 0 }; + } + break :result WValue{ .none = {} }; + } const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)); if (op_is_ptr or isByRef(payload_ty, func.target)) { -- cgit v1.2.3 From 969f9211622b3f2d296a6f51449605d65b66bb31 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sat, 27 May 2023 15:11:53 +0200 Subject: wasm: `ptr_elem_val` use pointer type for local When storing the address after calculating the element's address, ensure it's stored in a local with the correct type. Previously it would incorrectly use the element's type, which could be a float for example and therefore generate invalid WebAssembly code. This change also introduces a more robust `store` function. --- src/arch/wasm/CodeGen.zig | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 1cd282f5f7..2a8e1668e8 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2318,6 +2318,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void { assert(!(lhs != .stack and rhs == .stack)); + const abi_size = ty.abiSize(func.target); switch (ty.zigTypeTag()) { .ErrorUnion => { const pl_ty = ty.errorUnionPayload(); @@ -2325,7 +2326,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.store(lhs, rhs, Type.anyerror, 0); } - const len = @intCast(u32, ty.abiSize(func.target)); + const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Optional => { @@ -2341,16 +2342,16 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.store(lhs, rhs, Type.anyerror, 0); } - const len = @intCast(u32, ty.abiSize(func.target)); + const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Struct, .Array, .Union => if (isByRef(ty, func.target)) { - const len = @intCast(u32, ty.abiSize(func.target)); + const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Vector => switch (determineSimdStoreStrategy(ty, func.target)) { .unrolled => { - const len = @intCast(u32, ty.abiSize(func.target)); + const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .direct => { @@ -2382,7 +2383,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return; } }, - .Int => if (ty.intInfo(func.target).bits > 64) { + .Int, .Float => if (abi_size > 8 and abi_size <= 16) { try func.emitWValue(lhs); const lsb = try func.load(rhs, Type.u64, 0); try func.store(.{ .stack = {} }, lsb, Type.u64, 0 + lhs.offset()); @@ -2391,8 +2392,15 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE const msb = try func.load(rhs, Type.u64, 8); try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset()); return; + } else if (abi_size > 16) { + try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(func.target)) }); + }, + else => if (abi_size > 8) { + return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ + ty.fmt(func.bin_file.base.options.module.?), + abi_size, + }); }, - else => {}, } try func.emitWValue(lhs); // In this case we're actually interested in storing the stack position @@ -2400,11 +2408,9 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.lowerToStack(rhs); const valtype = typeToValtype(ty, func.target); - const abi_size = @intCast(u8, ty.abiSize(func.target)); - const opcode = buildOpcode(.{ .valtype1 = valtype, - .width = abi_size * 8, + .width = @intCast(u8, abi_size * 8), .op = .store, }); @@ -3960,7 +3966,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const result = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { if (op_is_ptr) { - break :result WValue{ .imm32 = 0 }; + break :result func.reuseOperand(ty_op.operand, operand); } break :result WValue{ .none = {} }; } @@ -4453,7 +4459,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_add); const elem_result = val: { - var result = try func.allocLocal(elem_ty); + var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); if (isByRef(elem_ty, func.target)) { break :val result; @@ -5155,8 +5161,8 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { assert(operand_ty.abiSize(func.target) >= 16); assert(!(lhs != .stack and rhs == .stack)); - if (operand_ty.intInfo(func.target).bits > 128) { - return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.intInfo(func.target).bits}); + if (operand_ty.bitSize(func.target) > 128) { + return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(func.target)}); } var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); -- cgit v1.2.3 From 128814f9bf89460232569ae3e47c52f90cdf4ffd Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sat, 27 May 2023 15:14:18 +0200 Subject: wasm: `aggregate_init` store sentinel for arrays --- src/arch/wasm/CodeGen.zig | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2a8e1668e8..57f6beb035 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4926,6 +4926,9 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(result_ty); const elem_ty = result_ty.childType(); const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); + const sentinel = if (result_ty.sentinel()) |sent| blk: { + break :blk try func.lowerConstant(sent, elem_ty); + } else null; // When the element type is by reference, we must copy the entire // value. It is therefore safer to move the offset pointer and store @@ -4938,10 +4941,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_val = try func.resolveInst(elem); try func.store(offset, elem_val, elem_ty, 0); - if (elem_index < elements.len - 1) { + if (elem_index < elements.len - 1 and sentinel == null) { _ = try func.buildPointerOffset(offset, elem_size, .modify); } } + if (sentinel) |sent| { + try func.store(offset, sent, elem_ty, 0); + } } else { var offset: u32 = 0; for (elements) |elem| { @@ -4949,6 +4955,9 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.store(result, elem_val, elem_ty, offset); offset += elem_size; } + if (sentinel) |sent| { + try func.store(result, sent, elem_ty, offset); + } } break :result_value result; }, -- cgit v1.2.3 From e36cc0ce8f4c0ec7c1398149c5fa30f15d8dae9f Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 29 May 2023 12:33:24 +0200 Subject: wasm: `union_init` support packed unions --- src/arch/wasm/CodeGen.zig | 83 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 22 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 57f6beb035..34a3c68102 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1014,6 +1014,17 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { .direct => wasm.Valtype.v128, .unrolled => wasm.Valtype.i32, }, + .Union => switch (ty.containerLayout()) { + .Packed => { + var int_ty_payload: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, ty.bitSize(target)), + }; + const int_ty = Type.initPayload(&int_ty_payload.base); + return typeToValtype(int_ty, target); + }, + else => wasm.Valtype.i32, + }, else => wasm.Valtype.i32, // all represented as reference/immediate }; } @@ -5074,33 +5085,61 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { assert(!isByRef(union_ty, func.target)); break :result tag_int; } - assert(isByRef(union_ty, func.target)); - - const result_ptr = try func.allocStack(union_ty); - const payload = try func.resolveInst(extra.init); - if (layout.tag_align >= layout.payload_align) { - if (isByRef(field.ty, func.target)) { - const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); - try func.store(payload_ptr, payload, field.ty, 0); - } else { - try func.store(result_ptr, payload, field.ty, @intCast(u32, layout.tag_size)); - } - if (layout.tag_size > 0) { - try func.store(result_ptr, tag_int, union_obj.tag_ty, 0); + if (isByRef(union_ty, func.target)) { + const result_ptr = try func.allocStack(union_ty); + const payload = try func.resolveInst(extra.init); + if (layout.tag_align >= layout.payload_align) { + if (isByRef(field.ty, func.target)) { + const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); + try func.store(payload_ptr, payload, field.ty, 0); + } else { + try func.store(result_ptr, payload, field.ty, @intCast(u32, layout.tag_size)); + } + + if (layout.tag_size > 0) { + try func.store(result_ptr, tag_int, union_obj.tag_ty, 0); + } + } else { + try func.store(result_ptr, payload, field.ty, 0); + if (layout.tag_size > 0) { + try func.store( + result_ptr, + tag_int, + union_obj.tag_ty, + @intCast(u32, layout.payload_size), + ); + } } + break :result result_ptr; } else { - try func.store(result_ptr, payload, field.ty, 0); - if (layout.tag_size > 0) { - try func.store( - result_ptr, - tag_int, - union_obj.tag_ty, - @intCast(u32, layout.payload_size), - ); + const operand = try func.resolveInst(extra.init); + var payload: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, union_ty.bitSize(func.target)), + }; + const union_int_type = Type.initPayload(&payload.base); + if (field.ty.zigTypeTag() == .Float) { + var int_payload: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, field.ty.bitSize(func.target)), + }; + const int_type = Type.initPayload(&int_payload.base); + const bitcasted = try func.bitcast(field.ty, int_type, operand); + const casted = try func.trunc(bitcasted, int_type, union_int_type); + break :result try casted.toLocal(func, field.ty); + } else if (field.ty.isPtrAtRuntime()) { + var int_payload: Type.Payload.Bits = .{ + .base = .{ .tag = .int_unsigned }, + .data = @intCast(u16, field.ty.bitSize(func.target)), + }; + const int_type = Type.initPayload(&int_payload.base); + const casted = try func.intcast(operand, int_type, union_int_type); + break :result try casted.toLocal(func, field.ty); } + const casted = try func.intcast(operand, field.ty, union_int_type); + break :result try casted.toLocal(func, field.ty); } - break :result result_ptr; }; return func.finishAir(inst, result, &.{extra.init}); -- cgit v1.2.3 From 7e10cf4fbe2a8a379564941be9953ce157d483cc Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 29 May 2023 14:23:51 +0200 Subject: wasm: `shl_with_overflow` ensure rhs is coerced Both operands must have the same Wasm type before we are allowed to perform any binary operation on the values. --- src/arch/wasm/CodeGen.zig | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 34a3c68102..2072ff1506 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -5707,6 +5707,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); const lhs_ty = func.air.typeOf(extra.lhs); + const rhs_ty = func.air.typeOf(extra.rhs); if (lhs_ty.zigTypeTag() == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); @@ -5718,7 +5719,15 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); }; - var shl = try (try func.binOp(lhs, rhs, lhs_ty, .shl)).toLocal(func, lhs_ty); + // Ensure rhs is coerced to lhs as they must have the same WebAssembly types + // before we can perform any binary operation. + const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(func.target).bits).?; + const rhs_final = if (wasm_bits != rhs_wasm_bits) blk: { + const rhs_casted = try func.intcast(rhs, rhs_ty, lhs_ty); + break :blk try rhs_casted.toLocal(func, lhs_ty); + } else rhs; + + var shl = try (try func.binOp(lhs, rhs_final, lhs_ty, .shl)).toLocal(func, lhs_ty); defer shl.free(func); var result = if (wasm_bits != int_info.bits) blk: { break :blk try (try func.wrapOperand(shl, lhs_ty)).toLocal(func, lhs_ty); @@ -5729,11 +5738,11 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // emit lhs to stack to we can keep 'wrapped' on the stack also try func.emitWValue(lhs); const abs = try func.signAbsValue(shl, lhs_ty); - const wrapped = try func.wrapBinOp(abs, rhs, lhs_ty, .shr); + const wrapped = try func.wrapBinOp(abs, rhs_final, lhs_ty, .shr); break :blk try func.cmp(.{ .stack = {} }, wrapped, lhs_ty, .neq); } else blk: { try func.emitWValue(lhs); - const shr = try func.binOp(result, rhs, lhs_ty, .shr); + const shr = try func.binOp(result, rhs_final, lhs_ty, .shr); break :blk try func.cmp(.{ .stack = {} }, shr, lhs_ty, .neq); }; var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); -- cgit v1.2.3 From ebfd3450d9a3338726e1ed8b08a5751b06604cd5 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Tue, 30 May 2023 21:55:44 +0200 Subject: codegen: Write padding bytes for unions Previously we did not write any missing padding bytes after the smallest field (either tag or payload, depending on alignment). This resulted in writing too few bytes and not matching the full abisize of the union. --- src/arch/wasm/CodeGen.zig | 4 ++-- src/codegen.zig | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2072ff1506..d4be9bf139 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1726,8 +1726,8 @@ fn isByRef(ty: Type, target: std.Target) bool { .Array, .Frame, - .Union, - => { + => return ty.hasRuntimeBitsIgnoreComptime(), + .Union => { if (ty.castTag(.@"union")) |union_ty| { if (union_ty.data.layout == .Packed) { return ty.abiSize(target) > 8; diff --git a/src/codegen.zig b/src/codegen.zig index 692c55e380..adce183833 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -611,6 +611,10 @@ pub fn generateSymbol( } } + if (layout.padding > 0) { + try code.writer().writeByteNTimes(0, layout.padding); + } + return Result.ok; }, .Optional => { -- cgit v1.2.3 From 9aec2758cc29d27c31dcb0b4bb040484a885ef23 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 2 May 2023 15:01:45 -0700 Subject: stage2: start the InternPool transition Instead of doing everything at once which is a hopelessly large task, this introduces a piecemeal transition that can be done in small increments at a time. This is a minimal changeset that keeps the compiler compiling. It only uses the InternPool for a small set of types. Behavior tests are not passing. Air.Inst.Ref and Zir.Inst.Ref are separated into different enums but compile-time verified to have the same fields in the same order. The large set of changes is mainly to deal with the fact that most Type and Value methods now require a Module to be passed in, so that the InternPool object can be accessed. --- src/Air.zig | 125 ++- src/AstGen.zig | 6 +- src/Autodoc.zig | 2 - src/Compilation.zig | 3 +- src/InternPool.zig | 518 ++++++++- src/Liveness.zig | 8 +- src/Module.zig | 335 ++++-- src/RangeSet.zig | 13 +- src/Sema.zig | 2383 +++++++++++++++++++++++------------------- src/TypedValue.zig | 42 +- src/Zir.zig | 518 ++------- src/arch/aarch64/CodeGen.zig | 351 ++++--- src/arch/aarch64/abi.zig | 36 +- src/arch/arm/CodeGen.zig | 342 +++--- src/arch/arm/abi.zig | 41 +- src/arch/riscv64/CodeGen.zig | 72 +- src/arch/riscv64/abi.zig | 18 +- src/arch/sparc64/CodeGen.zig | 239 +++-- src/arch/wasm/CodeGen.zig | 966 +++++++++-------- src/arch/wasm/abi.zig | 41 +- src/arch/x86_64/CodeGen.zig | 774 +++++++------- src/arch/x86_64/abi.zig | 82 +- src/codegen.zig | 204 ++-- src/codegen/c.zig | 727 ++++++------- src/codegen/c/type.zig | 150 ++- src/codegen/llvm.zig | 1557 ++++++++++++++------------- src/codegen/spirv.zig | 208 ++-- src/link/Coff.zig | 7 +- src/link/Dwarf.zig | 103 +- src/link/Elf.zig | 9 +- src/link/MachO.zig | 11 +- src/link/Plan9.zig | 9 +- src/link/Wasm.zig | 16 +- src/print_air.zig | 8 +- src/print_zir.zig | 12 +- src/target.zig | 128 --- src/type.zig | 1313 ++++++++++++----------- src/value.zig | 880 ++++++++-------- 38 files changed, 6473 insertions(+), 5784 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index 7ee36206f1..b60e8eda9d 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -5,10 +5,12 @@ const std = @import("std"); const builtin = @import("builtin"); -const Value = @import("value.zig").Value; -const Type = @import("type.zig").Type; const assert = std.debug.assert; + const Air = @This(); +const Value = @import("value.zig").Value; +const Type = @import("type.zig").Type; +const InternPool = @import("InternPool.zig"); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. @@ -837,7 +839,88 @@ pub const Inst = struct { /// The position of an AIR instruction within the `Air` instructions array. pub const Index = u32; - pub const Ref = @import("Zir.zig").Inst.Ref; + pub const Ref = enum(u32) { + u1_type = @enumToInt(InternPool.Index.u1_type), + u8_type = @enumToInt(InternPool.Index.u8_type), + i8_type = @enumToInt(InternPool.Index.i8_type), + u16_type = @enumToInt(InternPool.Index.u16_type), + i16_type = @enumToInt(InternPool.Index.i16_type), + u29_type = @enumToInt(InternPool.Index.u29_type), + u32_type = @enumToInt(InternPool.Index.u32_type), + i32_type = @enumToInt(InternPool.Index.i32_type), + u64_type = @enumToInt(InternPool.Index.u64_type), + i64_type = @enumToInt(InternPool.Index.i64_type), + u80_type = @enumToInt(InternPool.Index.u80_type), + u128_type = @enumToInt(InternPool.Index.u128_type), + i128_type = @enumToInt(InternPool.Index.i128_type), + usize_type = @enumToInt(InternPool.Index.usize_type), + isize_type = @enumToInt(InternPool.Index.isize_type), + c_char_type = @enumToInt(InternPool.Index.c_char_type), + c_short_type = @enumToInt(InternPool.Index.c_short_type), + c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type), + c_int_type = @enumToInt(InternPool.Index.c_int_type), + c_uint_type = @enumToInt(InternPool.Index.c_uint_type), + c_long_type = @enumToInt(InternPool.Index.c_long_type), + c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type), + c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type), + c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type), + c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type), + f16_type = @enumToInt(InternPool.Index.f16_type), + f32_type = @enumToInt(InternPool.Index.f32_type), + f64_type = @enumToInt(InternPool.Index.f64_type), + f80_type = @enumToInt(InternPool.Index.f80_type), + f128_type = @enumToInt(InternPool.Index.f128_type), + anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type), + bool_type = @enumToInt(InternPool.Index.bool_type), + void_type = @enumToInt(InternPool.Index.void_type), + type_type = @enumToInt(InternPool.Index.type_type), + anyerror_type = @enumToInt(InternPool.Index.anyerror_type), + comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type), + comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type), + noreturn_type = @enumToInt(InternPool.Index.noreturn_type), + anyframe_type = @enumToInt(InternPool.Index.anyframe_type), + null_type = @enumToInt(InternPool.Index.null_type), + undefined_type = @enumToInt(InternPool.Index.undefined_type), + enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type), + atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type), + atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type), + calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type), + address_space_type = @enumToInt(InternPool.Index.address_space_type), + float_mode_type = @enumToInt(InternPool.Index.float_mode_type), + reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type), + call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type), + prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type), + export_options_type = @enumToInt(InternPool.Index.export_options_type), + extern_options_type = @enumToInt(InternPool.Index.extern_options_type), + type_info_type = @enumToInt(InternPool.Index.type_info_type), + manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), + manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), + const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), + anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), + generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), + empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), + undef = @enumToInt(InternPool.Index.undef), + zero = @enumToInt(InternPool.Index.zero), + zero_usize = @enumToInt(InternPool.Index.zero_usize), + one = @enumToInt(InternPool.Index.one), + one_usize = @enumToInt(InternPool.Index.one_usize), + calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), + calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), + void_value = @enumToInt(InternPool.Index.void_value), + unreachable_value = @enumToInt(InternPool.Index.unreachable_value), + null_value = @enumToInt(InternPool.Index.null_value), + bool_true = @enumToInt(InternPool.Index.bool_true), + bool_false = @enumToInt(InternPool.Index.bool_false), + empty_struct = @enumToInt(InternPool.Index.empty_struct), + generic_poison = @enumToInt(InternPool.Index.generic_poison), + + /// This Ref does not correspond to any AIR instruction or constant + /// value and may instead be used as a sentinel to indicate null. + none = std.math.maxInt(u32), + _, + }; /// All instructions have an 8-byte payload, which is contained within /// this union. `Tag` determines which union field is active, as well as @@ -1066,10 +1149,13 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[ref_int].ty; + if (ref_int < InternPool.static_keys.len) { + return .{ + .ip_index = InternPool.static_keys[ref_int].typeOf(), + .legacy = undefined, + }; } - return air.typeOfIndex(@intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len)); + return air.typeOfIndex(ref_int - ref_start_index); } pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { @@ -1286,11 +1372,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand); - switch (callee_ty.zigTypeTag()) { - .Fn => return callee_ty.fnReturnType(), - .Pointer => return callee_ty.childType().fnReturnType(), - else => unreachable, - } + return callee_ty.fnReturnType(); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { @@ -1328,11 +1410,11 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const ref_int = @enumToInt(ref); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - var buffer: Value.ToTypeBuffer = undefined; - return Air.Inst.Ref.typed_value_map[ref_int].val.toType(&buffer); + if (ref_int < ref_start_index) { + const ip_index = @intToEnum(InternPool.Index, ref_int); + return ip_index.toType(); } - const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len; + const inst_index = ref_int - ref_start_index; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); assert(air_tags[inst_index] == .const_ty); @@ -1367,7 +1449,7 @@ pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { air.* = undefined; } -const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; +pub const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { return @intToEnum(Air.Inst.Ref, ref_start_index + inst); @@ -1383,17 +1465,18 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Air.Inst.Ref) ?Value { +pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const @import("Module.zig")) ?Value { const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[ref_int].val; + if (ref_int < ref_start_index) { + const ip_index = @intToEnum(InternPool.Index, ref_int); + return ip_index.toValue(); } - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index); const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], .const_ty => unreachable, - else => return air.typeOfIndex(inst_index).onePossibleValue(), + else => return air.typeOfIndex(inst_index).onePossibleValue(mod), } } diff --git a/src/AstGen.zig b/src/AstGen.zig index b38067fd03..6461b11d80 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -8530,7 +8530,7 @@ fn builtinCall( return rvalue(gz, ri, result, node); }, .call => { - const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .modifier_type } }, params[0]); + const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .call_modifier_type } }, params[0]); const callee = try expr(gz, scope, .{ .rl = .none }, params[1]); const args = try expr(gz, scope, .{ .rl = .none }, params[2]); const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{ @@ -10298,10 +10298,6 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type), as_ty | @enumToInt(Zir.Inst.Ref.null_type), as_ty | @enumToInt(Zir.Inst.Ref.undefined_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_noreturn_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_void_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_naked_noreturn_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_ccc_void_no_args_type), as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type), diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 879f0a6b15..c20c5771dd 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -95,8 +95,6 @@ pub fn generateZirData(self: *Autodoc) !void { } } - log.debug("Ref map size: {}", .{Ref.typed_value_map.len}); - const root_src_dir = self.comp_module.main_pkg.root_src_directory; const root_src_path = self.comp_module.main_pkg.root_src_path; const joined_src_path = try root_src_dir.join(self.arena, &.{root_src_path}); diff --git a/src/Compilation.zig b/src/Compilation.zig index cbdc789d40..15e393c35c 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1317,7 +1317,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .emit_h = emit_h, .error_name_list = .{}, }; - try module.error_name_list.append(gpa, "(no error)"); + try module.init(); break :blk module; } else blk: { @@ -2064,6 +2064,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void if (!build_options.only_c and !build_options.only_core_functionality) { if (comp.emit_docs) |doc_location| { if (comp.bin_file.options.module) |module| { + if (true) @panic("TODO: get autodoc working again in this branch"); var autodoc = Autodoc.init(module, doc_location); defer autodoc.deinit(); try autodoc.generateZirData(); diff --git a/src/InternPool.zig b/src/InternPool.zig index 74155ca657..b835315e5a 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1,11 +1,16 @@ +//! All interned objects have both a value and a type. + map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, -const InternPool = @This(); const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; +const BigIntConst = std.math.big.int.Const; + +const InternPool = @This(); +const DeclIndex = enum(u32) { _ }; const KeyAdapter = struct { intern_pool: *const InternPool, @@ -17,24 +22,21 @@ const KeyAdapter = struct { pub fn hash(ctx: @This(), a: Key) u32 { _ = ctx; - return a.hash(); + return a.hash32(); } }; pub const Key = union(enum) { - int_type: struct { - signedness: std.builtin.Signedness, - bits: u16, - }, + int_type: IntType, ptr_type: struct { elem_type: Index, - sentinel: Index, - alignment: u16, + sentinel: Index = .none, + alignment: u16 = 0, size: std.builtin.Type.Pointer.Size, - is_const: bool, - is_volatile: bool, - is_allowzero: bool, - address_space: std.builtin.AddressSpace, + is_const: bool = false, + is_volatile: bool = false, + is_allowzero: bool = false, + address_space: std.builtin.AddressSpace = .generic, }, array_type: struct { len: u64, @@ -52,20 +54,52 @@ pub const Key = union(enum) { error_set_type: Index, payload_type: Index, }, - simple: Simple, + simple_type: SimpleType, + simple_value: SimpleValue, + extern_func: struct { + ty: Index, + /// The Decl that corresponds to the function itself. + owner_decl: DeclIndex, + /// Library name if specified. + /// For example `extern "c" fn write(...) usize` would have 'c' as library name. + /// Index into the string table bytes. + lib_name: u32, + }, + int: struct { + ty: Index, + big_int: BigIntConst, + }, + enum_tag: struct { + ty: Index, + tag: BigIntConst, + }, + struct_type: struct { + fields_len: u32, + // TODO move Module.Struct data to here + }, + + pub const IntType = std.builtin.Type.Int; - pub fn hash(key: Key) u32 { + pub fn hash32(key: Key) u32 { + return @truncate(u32, key.hash64()); + } + + pub fn hash64(key: Key) u64 { var hasher = std.hash.Wyhash.init(0); + key.hashWithHasher(&hasher); + return hasher.final(); + } + + pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash) void { switch (key) { .int_type => |int_type| { - std.hash.autoHash(&hasher, int_type); + std.hash.autoHash(hasher, int_type); }, .array_type => |array_type| { - std.hash.autoHash(&hasher, array_type); + std.hash.autoHash(hasher, array_type); }, else => @panic("TODO"), } - return @truncate(u32, hasher.final()); } pub fn eql(a: Key, b: Key) bool { @@ -85,6 +119,34 @@ pub const Key = union(enum) { else => @panic("TODO"), } } + + pub fn typeOf(key: Key) Index { + switch (key) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .optional_type, + .error_union_type, + .simple_type, + .struct_type, + => return .type_type, + + .int => |x| return x.ty, + .extern_func => |x| return x.ty, + .enum_tag => |x| return x.ty, + + .simple_value => |s| switch (s) { + .undefined => return .undefined_type, + .void => return .void_type, + .null => return .null_type, + .false, .true => return .bool_type, + .empty_struct => return .empty_struct_type, + .@"unreachable" => return .noreturn_type, + .generic_poison => unreachable, + }, + } + } }; pub const Item = struct { @@ -98,11 +160,330 @@ pub const Item = struct { /// Two values which have the same type can be equality compared simply /// by checking if their indexes are equal, provided they are both in /// the same `InternPool`. +/// When adding a tag to this enum, consider adding a corresponding entry to +/// `primitives` in AstGen.zig. pub const Index = enum(u32) { + u1_type, + u8_type, + i8_type, + u16_type, + i16_type, + u29_type, + u32_type, + i32_type, + u64_type, + i64_type, + u80_type, + u128_type, + i128_type, + usize_type, + isize_type, + c_char_type, + c_short_type, + c_ushort_type, + c_int_type, + c_uint_type, + c_long_type, + c_ulong_type, + c_longlong_type, + c_ulonglong_type, + c_longdouble_type, + f16_type, + f32_type, + f64_type, + f80_type, + f128_type, + anyopaque_type, + bool_type, + void_type, + type_type, + anyerror_type, + comptime_int_type, + comptime_float_type, + noreturn_type, + anyframe_type, + null_type, + undefined_type, + enum_literal_type, + atomic_order_type, + atomic_rmw_op_type, + calling_convention_type, + address_space_type, + float_mode_type, + reduce_op_type, + call_modifier_type, + prefetch_options_type, + export_options_type, + extern_options_type, + type_info_type, + manyptr_u8_type, + manyptr_const_u8_type, + single_const_pointer_to_comptime_int_type, + const_slice_u8_type, + anyerror_void_error_union_type, + generic_poison_type, + var_args_param_type, + empty_struct_type, + + /// `undefined` (untyped) + undef, + /// `0` (comptime_int) + zero, + /// `0` (usize) + zero_usize, + /// `1` (comptime_int) + one, + /// `1` (usize) + one_usize, + /// `std.builtin.CallingConvention.C` + calling_convention_c, + /// `std.builtin.CallingConvention.Inline` + calling_convention_inline, + /// `{}` + void_value, + /// `unreachable` (noreturn type) + unreachable_value, + /// `null` (untyped) + null_value, + /// `true` + bool_true, + /// `false` + bool_false, + /// `.{}` (untyped) + empty_struct, + /// Used for generic parameters where the type and value + /// is not known until generic function instantiation. + generic_poison, + none = std.math.maxInt(u32), + _, + + pub fn toType(i: Index) @import("type.zig").Type { + assert(i != .none); + return .{ + .ip_index = i, + .legacy = undefined, + }; + } + + pub fn toValue(i: Index) @import("value.zig").Value { + assert(i != .none); + return .{ + .ip_index = i, + .legacy = undefined, + }; + } +}; + +pub const static_keys = [_]Key{ + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 1, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 8, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 8, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 16, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 16, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 29, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 32, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 32, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 64, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 64, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 80, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 128, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 128, + } }, + + .{ .simple_type = .usize }, + .{ .simple_type = .isize }, + .{ .simple_type = .c_char }, + .{ .simple_type = .c_short }, + .{ .simple_type = .c_ushort }, + .{ .simple_type = .c_int }, + .{ .simple_type = .c_uint }, + .{ .simple_type = .c_long }, + .{ .simple_type = .c_ulong }, + .{ .simple_type = .c_longlong }, + .{ .simple_type = .c_ulonglong }, + .{ .simple_type = .c_longdouble }, + .{ .simple_type = .f16 }, + .{ .simple_type = .f32 }, + .{ .simple_type = .f64 }, + .{ .simple_type = .f80 }, + .{ .simple_type = .f128 }, + .{ .simple_type = .anyopaque }, + .{ .simple_type = .bool }, + .{ .simple_type = .void }, + .{ .simple_type = .type }, + .{ .simple_type = .anyerror }, + .{ .simple_type = .comptime_int }, + .{ .simple_type = .comptime_float }, + .{ .simple_type = .noreturn }, + .{ .simple_type = .@"anyframe" }, + .{ .simple_type = .null }, + .{ .simple_type = .undefined }, + .{ .simple_type = .enum_literal }, + .{ .simple_type = .atomic_order }, + .{ .simple_type = .atomic_rmw_op }, + .{ .simple_type = .calling_convention }, + .{ .simple_type = .address_space }, + .{ .simple_type = .float_mode }, + .{ .simple_type = .reduce_op }, + .{ .simple_type = .call_modifier }, + .{ .simple_type = .prefetch_options }, + .{ .simple_type = .export_options }, + .{ .simple_type = .extern_options }, + .{ .simple_type = .type_info }, + + .{ .ptr_type = .{ + .elem_type = .u8_type, + .size = .Many, + } }, + + .{ .ptr_type = .{ + .elem_type = .u8_type, + .size = .Many, + .is_const = true, + } }, + + .{ .ptr_type = .{ + .elem_type = .comptime_int_type, + .size = .One, + .is_const = true, + } }, + + .{ .ptr_type = .{ + .elem_type = .u8_type, + .size = .Slice, + .is_const = true, + } }, + + .{ .error_union_type = .{ + .error_set_type = .anyerror_type, + .payload_type = .void_type, + } }, + + // generic_poison_type + .{ .simple_type = .generic_poison }, + + // var_args_param_type + .{ .simple_type = .var_args_param }, + + // empty_struct_type + .{ .struct_type = .{ + .fields_len = 0, + } }, + + .{ .simple_value = .undefined }, + + .{ .int = .{ + .ty = .comptime_int_type, + .big_int = .{ + .limbs = &.{0}, + .positive = true, + }, + } }, + + .{ .int = .{ + .ty = .usize_type, + .big_int = .{ + .limbs = &.{0}, + .positive = true, + }, + } }, + + .{ .int = .{ + .ty = .comptime_int_type, + .big_int = .{ + .limbs = &.{1}, + .positive = true, + }, + } }, + + .{ .int = .{ + .ty = .usize_type, + .big_int = .{ + .limbs = &.{1}, + .positive = true, + }, + } }, + + .{ .enum_tag = .{ + .ty = .calling_convention_type, + .tag = .{ + .limbs = &.{@enumToInt(std.builtin.CallingConvention.C)}, + .positive = true, + }, + } }, + + .{ .enum_tag = .{ + .ty = .calling_convention_type, + .tag = .{ + .limbs = &.{@enumToInt(std.builtin.CallingConvention.Inline)}, + .positive = true, + }, + } }, + + .{ .simple_value = .void }, + .{ .simple_value = .@"unreachable" }, + .{ .simple_value = .null }, + .{ .simple_value = .true }, + .{ .simple_value = .false }, + .{ .simple_value = .empty_struct }, + .{ .simple_value = .generic_poison }, }; +/// How many items in the InternPool are statically known. +pub const static_len: u32 = static_keys.len; + pub const Tag = enum(u8) { /// An integer type. /// data is number of bits @@ -113,9 +494,12 @@ pub const Tag = enum(u8) { /// An array type. /// data is payload to Array. type_array, - /// A type or value that can be represented with only an enum tag. - /// data is Simple enum value - simple, + /// A type that can be represented with only an enum tag. + /// data is SimpleType enum value. + simple_type, + /// A value that can be represented with only an enum tag. + /// data is SimpleValue enum value. + simple_value, /// An unsigned integer value that can be represented by u32. /// data is integer value int_u32, @@ -137,9 +521,20 @@ pub const Tag = enum(u8) { /// A float value that can be represented by f128. /// data is payload index to Float128. float_f128, + /// An extern function. + extern_func, + /// A regular function. + func, + /// Represents the data that an enum declaration provides, when the fields + /// are auto-numbered, and there are no declarations. + /// data is payload index to `EnumSimple`. + enum_simple, }; -pub const Simple = enum(u32) { +/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to +/// implement logic that only wants to deal with types because the logic can +/// ignore all simple values. Note that technically, types are values. +pub const SimpleType = enum(u32) { f16, f32, f64, @@ -147,6 +542,7 @@ pub const Simple = enum(u32) { f128, usize, isize, + c_char, c_short, c_ushort, c_int, @@ -165,14 +561,36 @@ pub const Simple = enum(u32) { comptime_float, noreturn, @"anyframe", - null_type, - undefined_type, - enum_literal_type, + null, undefined, - void_value, + enum_literal, + + atomic_order, + atomic_rmw_op, + calling_convention, + address_space, + float_mode, + reduce_op, + call_modifier, + prefetch_options, + export_options, + extern_options, + type_info, + + generic_poison, + var_args_param, +}; + +pub const SimpleValue = enum(u32) { + undefined, + void, null, - bool_true, - bool_false, + empty_struct, + true, + false, + @"unreachable", + + generic_poison, }; pub const Array = struct { @@ -180,10 +598,44 @@ pub const Array = struct { child: Index, }; +/// Trailing: +/// 0. field name: null-terminated string index for each fields_len; declaration order +pub const EnumSimple = struct { + /// The Decl that corresponds to the enum itself. + owner_decl: DeclIndex, + /// An integer type which is used for the numerical value of the enum. This + /// is inferred by Zig to be the smallest power of two unsigned int that + /// fits the number of fields. It is stored here to avoid unnecessary + /// calculations and possibly allocation failure when querying the tag type + /// of enums. + int_tag_ty: Index, + fields_len: u32, +}; + +pub fn init(ip: *InternPool, gpa: Allocator) !void { + assert(ip.items.len == 0); + + // So that we can use `catch unreachable` below. + try ip.items.ensureUnusedCapacity(gpa, static_keys.len); + try ip.map.ensureUnusedCapacity(gpa, static_keys.len); + try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); + + // This inserts all the statically-known values into the intern pool in the + // order expected. + for (static_keys) |key| _ = ip.get(gpa, key) catch unreachable; + + // Sanity check. + assert(ip.indexToKey(.bool_true).simple_value == .true); + assert(ip.indexToKey(.bool_false).simple_value == .false); + + assert(ip.items.len == static_keys.len); +} + pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.map.deinit(gpa); ip.items.deinit(gpa); ip.extra.deinit(gpa); + ip.* = undefined; } pub fn indexToKey(ip: InternPool, index: Index) Key { @@ -210,7 +662,8 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .sentinel = .none, } }; }, - .simple => .{ .simple = @intToEnum(Simple, data) }, + .simple_type => .{ .simple_type = @intToEnum(SimpleType, data) }, + .simple_value => .{ .simple_value = @intToEnum(SimpleValue, data) }, else => @panic("TODO"), }; @@ -224,12 +677,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } switch (key) { .int_type => |int_type| { - const tag: Tag = switch (int_type.signedness) { + const t: Tag = switch (int_type.signedness) { .signed => .type_int_signed, .unsigned => .type_int_unsigned, }; try ip.items.append(gpa, .{ - .tag = tag, + .tag = t, .data = int_type.bits, }); }, @@ -249,6 +702,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } +pub fn tag(ip: InternPool, index: Index) Tag { + const tags = ip.items.items(.tag); + return tags[@enumToInt(index)]; +} + fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 { const fields = std.meta.fields(@TypeOf(extra)); try ip.extra.ensureUnusedCapacity(gpa, fields.len); diff --git a/src/Liveness.zig b/src/Liveness.zig index 59135ef5c8..45d0705008 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -5,15 +5,17 @@ //! Some instructions are special, such as: //! * Conditional Branches //! * Switch Branches -const Liveness = @This(); const std = @import("std"); -const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; -const Air = @import("Air.zig"); const Log2Int = std.math.Log2Int; +const Liveness = @This(); +const trace = @import("tracy.zig").trace; +const Air = @import("Air.zig"); +const InternPool = @import("InternPool.zig"); + pub const Verify = @import("Liveness/Verify.zig"); /// This array is split into sets of 4 bits per AIR instruction. diff --git a/src/Module.zig b/src/Module.zig index a8f2281c4f..5756955d3c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -32,6 +32,19 @@ const build_options = @import("build_options"); const Liveness = @import("Liveness.zig"); const isUpDir = @import("introspect.zig").isUpDir; const clang = @import("clang.zig"); +const InternPool = @import("InternPool.zig"); + +comptime { + @setEvalBranchQuota(4000); + for ( + @typeInfo(Zir.Inst.Ref).Enum.fields, + @typeInfo(Air.Inst.Ref).Enum.fields, + @typeInfo(InternPool.Index).Enum.fields, + ) |zir_field, air_field, ip_field| { + assert(mem.eql(u8, zir_field.name, ip_field.name)); + assert(mem.eql(u8, air_field.name, ip_field.name)); + } +} /// General-purpose allocator. Used for both temporary and long-term storage. gpa: Allocator, @@ -83,6 +96,9 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{}, string_literal_bytes: ArrayListUnmanaged(u8) = .{}, +/// Stores all Type and Value objects; periodically garbage collected. +intern_pool: InternPool = .{}, + /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. @@ -807,9 +823,9 @@ pub const Decl = struct { return (try decl.typedValue()).val; } - pub fn isFunction(decl: Decl) !bool { + pub fn isFunction(decl: Decl, mod: *const Module) !bool { const tv = try decl.typedValue(); - return tv.ty.zigTypeTag() == .Fn; + return tv.ty.zigTypeTag(mod) == .Fn; } /// If the Decl has a value and it is a struct, return it, @@ -921,14 +937,14 @@ pub const Decl = struct { }; } - pub fn getAlignment(decl: Decl, target: Target) u32 { + pub fn getAlignment(decl: Decl, mod: *const Module) u32 { assert(decl.has_tv); if (decl.@"align" != 0) { // Explicit alignment. return decl.@"align"; } else { // Natural alignment. - return decl.ty.abiAlignment(target); + return decl.ty.abiAlignment(mod); } } }; @@ -1030,7 +1046,7 @@ pub const Struct = struct { /// Returns the field alignment. If the struct is packed, returns 0. pub fn alignment( field: Field, - target: Target, + mod: *const Module, layout: std.builtin.Type.ContainerLayout, ) u32 { if (field.abi_align != 0) { @@ -1038,24 +1054,26 @@ pub const Struct = struct { return field.abi_align; } + const target = mod.getTarget(); + switch (layout) { .Packed => return 0, .Auto => { if (target.ofmt == .c) { - return alignmentExtern(field, target); + return alignmentExtern(field, mod); } else { - return field.ty.abiAlignment(target); + return field.ty.abiAlignment(mod); } }, - .Extern => return alignmentExtern(field, target), + .Extern => return alignmentExtern(field, mod), } } - pub fn alignmentExtern(field: Field, target: Target) u32 { + pub fn alignmentExtern(field: Field, mod: *const Module) u32 { // This logic is duplicated in Type.abiAlignmentAdvanced. - const ty_abi_align = field.ty.abiAlignment(target); + const ty_abi_align = field.ty.abiAlignment(mod); - if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { // The C ABI requires 128 bit integer fields of structs // to be 16-bytes aligned. return @max(ty_abi_align, 16); @@ -1132,7 +1150,7 @@ pub const Struct = struct { }; } - pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 { + pub fn packedFieldBitOffset(s: Struct, mod: *const Module, index: usize) u16 { assert(s.layout == .Packed); assert(s.haveLayout()); var bit_sum: u64 = 0; @@ -1140,12 +1158,13 @@ pub const Struct = struct { if (i == index) { return @intCast(u16, bit_sum); } - bit_sum += field.ty.bitSize(target); + bit_sum += field.ty.bitSize(mod); } unreachable; // index out of bounds } pub const RuntimeFieldIterator = struct { + module: *const Module, struct_obj: *const Struct, index: u32 = 0, @@ -1155,6 +1174,7 @@ pub const Struct = struct { }; pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex { + const mod = it.module; while (true) { var i = it.index; it.index += 1; @@ -1167,15 +1187,18 @@ pub const Struct = struct { } const field = it.struct_obj.fields.values()[i]; - if (!field.is_comptime and field.ty.hasRuntimeBits()) { + if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) { return FieldAndIndex{ .index = i, .field = field }; } } } }; - pub fn runtimeFieldIterator(s: *const Struct) RuntimeFieldIterator { - return .{ .struct_obj = s }; + pub fn runtimeFieldIterator(s: *const Struct, module: *const Module) RuntimeFieldIterator { + return .{ + .struct_obj = s, + .module = module, + }; } }; @@ -1323,9 +1346,9 @@ pub const Union = struct { /// Returns the field alignment, assuming the union is not packed. /// Keep implementation in sync with `Sema.unionFieldAlignment`. /// Prefer to call that function instead of this one during Sema. - pub fn normalAlignment(field: Field, target: Target) u32 { + pub fn normalAlignment(field: Field, mod: *const Module) u32 { if (field.abi_align == 0) { - return field.ty.abiAlignment(target); + return field.ty.abiAlignment(mod); } else { return field.abi_align; } @@ -1383,22 +1406,22 @@ pub const Union = struct { }; } - pub fn hasAllZeroBitFieldTypes(u: Union) bool { + pub fn hasAllZeroBitFieldTypes(u: Union, mod: *const Module) bool { assert(u.haveFieldTypes()); for (u.fields.values()) |field| { - if (field.ty.hasRuntimeBits()) return false; + if (field.ty.hasRuntimeBits(mod)) return false; } return true; } - pub fn mostAlignedField(u: Union, target: Target) u32 { + pub fn mostAlignedField(u: Union, mod: *const Module) u32 { assert(u.haveFieldTypes()); var most_alignment: u32 = 0; var most_index: usize = undefined; for (u.fields.values(), 0..) |field, i| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; - const field_align = field.normalAlignment(target); + const field_align = field.normalAlignment(mod); if (field_align > most_alignment) { most_alignment = field_align; most_index = i; @@ -1408,20 +1431,20 @@ pub const Union = struct { } /// Returns 0 if the union is represented with 0 bits at runtime. - pub fn abiAlignment(u: Union, target: Target, have_tag: bool) u32 { + pub fn abiAlignment(u: Union, mod: *const Module, have_tag: bool) u32 { var max_align: u32 = 0; - if (have_tag) max_align = u.tag_ty.abiAlignment(target); + if (have_tag) max_align = u.tag_ty.abiAlignment(mod); for (u.fields.values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; - const field_align = field.normalAlignment(target); + const field_align = field.normalAlignment(mod); max_align = @max(max_align, field_align); } return max_align; } - pub fn abiSize(u: Union, target: Target, have_tag: bool) u64 { - return u.getLayout(target, have_tag).abi_size; + pub fn abiSize(u: Union, mod: *const Module, have_tag: bool) u64 { + return u.getLayout(mod, have_tag).abi_size; } pub const Layout = struct { @@ -1451,7 +1474,7 @@ pub const Union = struct { }; } - pub fn getLayout(u: Union, target: Target, have_tag: bool) Layout { + pub fn getLayout(u: Union, mod: *const Module, have_tag: bool) Layout { assert(u.haveLayout()); var most_aligned_field: u32 = undefined; var most_aligned_field_size: u64 = undefined; @@ -1460,16 +1483,16 @@ pub const Union = struct { var payload_align: u32 = 0; const fields = u.fields.values(); for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const field_align = a: { if (field.abi_align == 0) { - break :a field.ty.abiAlignment(target); + break :a field.ty.abiAlignment(mod); } else { break :a field.abi_align; } }; - const field_size = field.ty.abiSize(target); + const field_size = field.ty.abiSize(mod); if (field_size > payload_size) { payload_size = field_size; biggest_field = @intCast(u32, i); @@ -1481,7 +1504,7 @@ pub const Union = struct { } } payload_align = @max(payload_align, 1); - if (!have_tag or !u.tag_ty.hasRuntimeBits()) { + if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) { return .{ .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align), .abi_align = payload_align, @@ -1497,8 +1520,8 @@ pub const Union = struct { } // Put the tag before or after the payload depending on which one's // alignment is greater. - const tag_size = u.tag_ty.abiSize(target); - const tag_align = @max(1, u.tag_ty.abiAlignment(target)); + const tag_size = u.tag_ty.abiSize(mod); + const tag_align = @max(1, u.tag_ty.abiAlignment(mod)); var size: u64 = 0; var padding: u32 = undefined; if (tag_align >= payload_align) { @@ -2281,7 +2304,7 @@ pub const ErrorMsg = struct { ) !*ErrorMsg { const err_msg = try gpa.create(ErrorMsg); errdefer gpa.destroy(err_msg); - err_msg.* = try init(gpa, src_loc, format, args); + err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args); return err_msg; } @@ -3391,6 +3414,12 @@ pub const CompileError = error{ ComptimeBreak, }; +pub fn init(mod: *Module) !void { + const gpa = mod.gpa; + try mod.error_name_list.append(gpa, "(no error)"); + try mod.intern_pool.init(gpa); +} + pub fn deinit(mod: *Module) void { const gpa = mod.gpa; @@ -3518,6 +3547,8 @@ pub fn deinit(mod: *Module) void { mod.string_literal_table.deinit(gpa); mod.string_literal_bytes.deinit(gpa); + + mod.intern_pool.deinit(gpa); } pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { @@ -4277,7 +4308,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // Update all dependents which have at least this level of dependency. // If our type remained the same and we're a function, only update // decls which depend on our body; otherwise, update all dependents. - const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag() == .Fn) .function_body else .normal; + const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag(mod) == .Fn) .function_body else .normal; for (decl.dependants.keys(), decl.dependants.values()) |dep_index, dep_type| { if (@enumToInt(dep_type) < @enumToInt(update_level)) continue; @@ -4748,8 +4779,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl_tv.ty.fmt(mod), }); } - var buffer: Value.ToTypeBuffer = undefined; - const ty = try decl_tv.val.toType(&buffer).copy(decl_arena_allocator); + const ty = try decl_tv.val.toType().copy(decl_arena_allocator); if (ty.getNamespace() == null) { return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); } @@ -4775,7 +4805,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { var type_changed = true; if (decl.has_tv) { - prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(); + prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); type_changed = !decl.ty.eql(decl_tv.ty, mod); if (decl.getFunction()) |prev_func| { prev_is_inline = prev_func.state == .inline_only; @@ -5510,7 +5540,7 @@ pub fn clearDecl( try mod.deleteDeclExports(decl_index); if (decl.has_tv) { - if (decl.ty.isFnOrHasRuntimeBits()) { + if (decl.ty.isFnOrHasRuntimeBits(mod)) { mod.comp.bin_file.freeDecl(decl_index); } if (decl.getInnerNamespace()) |namespace| { @@ -5699,7 +5729,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const arg_val = if (arg_tv.val.tag() != .generic_poison) arg_tv.val - else if (arg_tv.ty.onePossibleValue()) |opv| + else if (arg_tv.ty.onePossibleValue(mod)) |opv| opv else break :t arg_tv.ty; @@ -5773,7 +5803,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // If we don't get an error return trace from a caller, create our own. if (func.calls_or_awaits_errorable_fn and mod.comp.bin_file.options.error_return_tracing and - !sema.fn_ret_ty.isError()) + !sema.fn_ret_ty.isError(mod)) { sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) { // TODO make these unreachable instead of @panic @@ -5995,25 +6025,11 @@ pub fn initNewAnonDecl( // if the Decl is referenced by an instruction or another constant. Otherwise, // the Decl will be garbage collected by the `codegen_decl` task instead of sent // to the linker. - if (typed_value.ty.isFnOrHasRuntimeBits()) { + if (typed_value.ty.isFnOrHasRuntimeBits(mod)) { try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index }); } } -pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type { - const int_payload = try arena.create(Type.Payload.Bits); - int_payload.* = .{ - .base = .{ - .tag = switch (signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - }, - }, - .data = bits, - }; - return Type.initPayload(&int_payload.base); -} - pub fn errNoteNonLazy( mod: *Module, src_loc: SrcLoc, @@ -6779,3 +6795,204 @@ pub fn backendSupportsFeature(mod: Module, feature: Feature) bool { .field_reordering => mod.comp.bin_file.options.use_llvm, }; } + +/// Shortcut for calling `intern_pool.get`. +pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Index { + return mod.intern_pool.get(mod.gpa, key); +} + +pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type { + const i = try intern(mod, .{ .int_type = .{ + .signedness = signedness, + .bits = bits, + } }); + return i.toType(); +} + +pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { + return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); +} + +/// Returns the smallest possible integer type containing both `min` and +/// `max`. Asserts that neither value is undef. +/// TODO: if #3806 is implemented, this becomes trivial +pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { + assert(!min.isUndef()); + assert(!max.isUndef()); + + if (std.debug.runtime_safety) { + assert(Value.order(min, max, mod).compare(.lte)); + } + + const sign = min.orderAgainstZero(mod) == .lt; + + const min_val_bits = intBitsForValue(mod, min, sign); + const max_val_bits = intBitsForValue(mod, max, sign); + + return mod.intType( + if (sign) .signed else .unsigned, + @max(min_val_bits, max_val_bits), + ); +} + +/// Given a value representing an integer, returns the number of bits necessary to represent +/// this value in an integer. If `sign` is true, returns the number of bits necessary in a +/// twos-complement integer; otherwise in an unsigned integer. +/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. +pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { + assert(!val.isUndef()); + switch (val.tag()) { + .int_big_positive => { + const limbs = val.castTag(.int_big_positive).?.data; + const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; + return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); + }, + .int_big_negative => { + const limbs = val.castTag(.int_big_negative).?.data; + // Zero is still a possibility, in which case unsigned is fine + for (limbs) |limb| { + if (limb != 0) break; + } else return 0; // val == 0 + assert(sign); + const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; + return @intCast(u16, big.bitCountTwosComp()); + }, + .int_i64 => { + const x = val.castTag(.int_i64).?.data; + if (x >= 0) return Type.smallestUnsignedBits(@intCast(u64, x)); + assert(sign); + return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; + }, + else => { + const x = val.toUnsignedInt(mod); + return Type.smallestUnsignedBits(x) + @boolToInt(sign); + }, + } +} + +pub const AtomicPtrAlignmentError = error{ + FloatTooBig, + IntTooBig, + BadType, +}; + +pub const AtomicPtrAlignmentDiagnostics = struct { + bits: u16 = undefined, + max_bits: u16 = undefined, +}; + +/// If ABI alignment of `ty` is OK for atomic operations, returns 0. +/// Otherwise returns the alignment required on a pointer for the target +/// to perform atomic operations. +// TODO this function does not take into account CPU features, which can affect +// this value. Audit this! +pub fn atomicPtrAlignment( + mod: *const Module, + ty: Type, + diags: *AtomicPtrAlignmentDiagnostics, +) AtomicPtrAlignmentError!u32 { + const target = mod.getTarget(); + const max_atomic_bits: u16 = switch (target.cpu.arch) { + .avr, + .msp430, + .spu_2, + => 16, + + .arc, + .arm, + .armeb, + .hexagon, + .m68k, + .le32, + .mips, + .mipsel, + .nvptx, + .powerpc, + .powerpcle, + .r600, + .riscv32, + .sparc, + .sparcel, + .tce, + .tcele, + .thumb, + .thumbeb, + .x86, + .xcore, + .amdil, + .hsail, + .spir, + .kalimba, + .lanai, + .shave, + .wasm32, + .renderscript32, + .csky, + .spirv32, + .dxil, + .loongarch32, + .xtensa, + => 32, + + .amdgcn, + .bpfel, + .bpfeb, + .le64, + .mips64, + .mips64el, + .nvptx64, + .powerpc64, + .powerpc64le, + .riscv64, + .sparc64, + .s390x, + .amdil64, + .hsail64, + .spir64, + .wasm64, + .renderscript64, + .ve, + .spirv64, + .loongarch64, + => 64, + + .aarch64, + .aarch64_be, + .aarch64_32, + => 128, + + .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64, + }; + + const int_ty = switch (ty.zigTypeTag(mod)) { + .Int => ty, + .Enum => ty.intTagType(), + .Float => { + const bit_count = ty.floatBits(target); + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.FloatTooBig; + } + return 0; + }, + .Bool => return 0, + else => { + if (ty.isPtrAtRuntime(mod)) return 0; + return error.BadType; + }, + }; + + const bit_count = int_ty.intInfo(mod).bits; + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.IntTooBig; + } + + return 0; +} diff --git a/src/RangeSet.zig b/src/RangeSet.zig index aa051ff424..2e28a562c6 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -60,13 +60,14 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { if (self.ranges.items.len == 0) return false; + const mod = self.module; std.mem.sort(Range, self.ranges.items, LessThanContext{ .ty = ty, - .module = self.module, + .module = mod, }, lessThan); - if (!self.ranges.items[0].first.eql(first, ty, self.module) or - !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, self.module)) + if (!self.ranges.items[0].first.eql(first, ty, mod) or + !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, mod)) { return false; } @@ -76,18 +77,16 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { var counter = try std.math.big.int.Managed.init(self.ranges.allocator); defer counter.deinit(); - const target = self.module.getTarget(); - // look for gaps for (self.ranges.items[1..], 0..) |cur, i| { // i starts counting from the second item. const prev = self.ranges.items[i]; // prev.last + 1 == cur.first - try counter.copy(prev.last.toBigInt(&space, target)); + try counter.copy(prev.last.toBigInt(&space, mod)); try counter.addScalar(&counter, 1); - const cur_start_int = cur.first.toBigInt(&space, target); + const cur_start_int = cur.first.toBigInt(&space, mod); if (!cur_start_int.eq(counter.toConst())) { return false; } diff --git a/src/Sema.zig b/src/Sema.zig index 9e21bfa83d..9b76fee68e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -114,6 +114,7 @@ const Package = @import("Package.zig"); const crash_report = @import("crash_report.zig"); const build_options = @import("build_options"); const Compilation = @import("Compilation.zig"); +const InternPool = @import("InternPool.zig"); pub const default_branch_quota = 1000; pub const default_reference_trace_len = 2; @@ -1614,6 +1615,7 @@ fn analyzeBodyInner( }, .@"try" => blk: { if (!block.is_comptime) break :blk try sema.zirTry(block, inst); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -1621,18 +1623,18 @@ fn analyzeBodyInner( const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_tv.val.toBool()) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1654,11 +1656,11 @@ fn analyzeBodyInner( const err_union = try sema.analyzeLoad(block, src, operand, operand_src); const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_tv.val.toBool()) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1721,17 +1723,12 @@ fn analyzeBodyInner( } pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { - var i: usize = @enumToInt(zir_ref); - + const i = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. - if (i < Zir.Inst.Ref.typed_value_map.len) { - // We intentionally map the same indexes to the same values between ZIR and AIR. - return zir_ref; - } - i -= Zir.Inst.Ref.typed_value_map.len; - - // Finally, the last section of indexes refers to the map of ZIR=>AIR. - const inst = sema.inst_map.get(@intCast(u32, i)).?; + // We intentionally map the same indexes to the same values between ZIR and AIR. + if (i < InternPool.static_len) return @intToEnum(Air.Inst.Ref, i); + // The last section of indexes refers to the map of ZIR => AIR. + const inst = sema.inst_map.get(i - InternPool.static_len).?; const ty = sema.typeOf(inst); if (ty.tag() == .generic_poison) return error.GenericPoison; return inst; @@ -1766,9 +1763,8 @@ pub fn resolveConstString( } pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { - assert(zir_ref != .var_args_param); const air_inst = try sema.resolveInst(zir_ref); - assert(air_inst != .var_args_param); + assert(air_inst != .var_args_param_type); const ty = try sema.analyzeAsType(block, src, air_inst); if (ty.tag() == .generic_poison) return error.GenericPoison; return ty; @@ -1783,8 +1779,7 @@ fn analyzeAsType( const wanted_type = Type.initTag(.type); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - const ty = val.toType(&buffer); + const ty = val.toType(); return ty.copy(sema.arena); } @@ -1950,12 +1945,12 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( make_runtime: *bool, ) CompileError!?Value { // First section of indexes correspond to a set number of constant values. - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val; + const int = @enumToInt(inst); + if (int < InternPool.static_len) { + return @intToEnum(InternPool.Index, int).toValue(); } - i -= Air.Inst.Ref.typed_value_map.len; + const i = int - InternPool.static_len; const air_tags = sema.air_instructions.items(.tag); if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| { if (air_tags[i] == .constant) { @@ -2010,13 +2005,14 @@ fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, opt } fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { + const mod = sema.mod; const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' does not support array initialization syntax", .{ - ty.fmt(sema.mod), + ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (ty.isSlice()) { - try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2().fmt(sema.mod)}); + try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)}); } break :msg msg; }; @@ -2042,7 +2038,8 @@ fn failWithErrorSetCodeMissing( } fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError { - if (int_ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (int_ty.zigTypeTag(mod) == .Vector) { const msg = msg: { const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{ int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod), @@ -2084,12 +2081,13 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError } fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { + const mod = sema.mod; const inner_ty = if (object_ty.isSinglePointer()) object_ty.childType() else object_ty; - if (inner_ty.zigTypeTag() == .Optional) opt: { + if (inner_ty.zigTypeTag(mod) == .Optional) opt: { var buf: Type.Payload.ElemType = undefined; const child_ty = inner_ty.optionalChild(&buf); - if (!typeSupportsFieldAccess(child_ty, field_name)) break :opt; + if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt; const msg = msg: { const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -2097,9 +2095,9 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (inner_ty.zigTypeTag() == .ErrorUnion) err: { + } else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: { const child_ty = inner_ty.errorUnionPayload(); - if (!typeSupportsFieldAccess(child_ty, field_name)) break :err; + if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err; const msg = msg: { const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -2111,14 +2109,14 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); } -fn typeSupportsFieldAccess(ty: Type, field_name: []const u8) bool { - switch (ty.zigTypeTag()) { +fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8) bool { + switch (ty.zigTypeTag(mod)) { .Array => return mem.eql(u8, field_name, "len"), .Pointer => { const ptr_info = ty.ptrInfo().data; if (ptr_info.size == .Slice) { return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len"); - } else if (ptr_info.pointee_type.zigTypeTag() == .Array) { + } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { return mem.eql(u8, field_name, "len"); } else return false; }, @@ -2352,10 +2350,10 @@ fn analyzeAsInt( dest_ty: Type, reason: []const u8, ) !u64 { + const mod = sema.mod; const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced, reason); - const target = sema.mod.getTarget(); - return (try val.getUnsignedIntAdvanced(target, sema)).?; + return (try val.getUnsignedIntAdvanced(mod, sema)).?; } // Returns a compile error if the value has tag `variable`. See `resolveInstValue` for @@ -2926,23 +2924,23 @@ fn zirEnumDecl( if (tag_type_ref != .none) { const ty = try sema.resolveType(block, tag_ty_src, tag_type_ref); - if (ty.zigTypeTag() != .Int and ty.zigTypeTag() != .ComptimeInt) { + if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); } enum_obj.tag_ty = try ty.copy(decl_arena_allocator); enum_obj.tag_ty_inferred = false; } else if (fields_len == 0) { - enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, 0); + enum_obj.tag_ty = try mod.intType(.unsigned, 0); enum_obj.tag_ty_inferred = true; } else { const bits = std.math.log2_int_ceil(usize, fields_len); - enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, bits); + enum_obj.tag_ty = try mod.intType(.unsigned, bits); enum_obj.tag_ty_inferred = true; } } - if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag() != .ComptimeInt) { - if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(sema.mod.getTarget())) { + if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag(mod) != .ComptimeInt) { + if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(mod)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } } @@ -3319,7 +3317,8 @@ fn ensureResultUsed( ty: Type, src: LazySrcLoc, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return, .ErrorSet, .ErrorUnion => { const msg = msg: { @@ -3347,11 +3346,12 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); const src = inst_data.src(); const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion => { const msg = msg: { const msg = try sema.errMsg(block, src, "error is discarded", .{}); @@ -3369,16 +3369,17 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const err_union_ty = if (operand_ty.zigTypeTag() == .Pointer) + const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer) operand_ty.childType() else operand_ty; - if (err_union_ty.zigTypeTag() != .ErrorUnion) return; - const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(); + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; + const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(mod); if (payload_ty != .Void and payload_ty != .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "error union payload is ignored", .{}); @@ -3920,19 +3921,20 @@ fn zirArrayBasePtr( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) { + while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; const elem_ty = sema.typeOf(base_ptr).childType(); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Array, .Vector => return base_ptr, .Struct => if (elem_ty.isTuple()) { // TODO validate element count @@ -3948,19 +3950,20 @@ fn zirFieldBasePtr( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) { + while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; const elem_ty = sema.typeOf(base_ptr).childType(); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Struct, .Union => return base_ptr, else => {}, } @@ -3968,6 +3971,7 @@ fn zirFieldBasePtr( } fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); @@ -3991,7 +3995,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. - const is_int = switch (object_ty.zigTypeTag()) { + const is_int = switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => true, else => false, }; @@ -4000,7 +4004,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .input_index = i, } }; const arg_len_uncoerced = if (is_int) object else l: { - if (!object_ty.isIndexable()) { + if (!object_ty.isIndexable(mod)) { // Instead of using checkIndexable we customize this error. const msg = msg: { const msg = try sema.errMsg(block, arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)}); @@ -4010,7 +4014,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }; return sema.failWithOwnedErrorMsg(msg); } - if (!object_ty.indexableHasLen()) continue; + if (!object_ty.indexableHasLen(mod)) continue; break :l try sema.fieldVal(block, arg_src, object, "len", arg_src); }; @@ -4061,7 +4065,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. - switch (object_ty.zigTypeTag()) { + switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => continue, else => {}, } @@ -4096,13 +4100,14 @@ fn validateArrayInitTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_init_ty = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data; const ty = try sema.resolveType(block, ty_src, extra.ty); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Array => { const array_len = ty.arrayLen(); if (extra.init_count != array_len) { @@ -4141,11 +4146,12 @@ fn validateStructInitTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct, .Union => return, else => {}, } @@ -4160,6 +4166,7 @@ fn zirValidateStructInit( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4168,7 +4175,7 @@ fn zirValidateStructInit( const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); const agg_ty = sema.typeOf(object_ptr).childType(); - switch (agg_ty.zigTypeTag()) { + switch (agg_ty.zigTypeTag(mod)) { .Struct => return sema.validateStructInit( block, agg_ty, @@ -4589,6 +4596,7 @@ fn zirValidateArrayInit( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4599,7 +4607,7 @@ fn zirValidateArrayInit( const array_ty = sema.typeOf(array_ptr).childType(); const array_len = array_ty.arrayLen(); - if (instrs.len != array_len) switch (array_ty.zigTypeTag()) { + if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) { .Struct => { var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); @@ -4667,7 +4675,7 @@ fn zirValidateArrayInit( // Determine whether the value stored to this pointer is comptime-known. if (array_ty.isTuple()) { - if (array_ty.structFieldValueComptime(i)) |opv| { + if (array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv; continue; } @@ -4770,12 +4778,13 @@ fn zirValidateArrayInit( } fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .Pointer) { + if (operand_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)}); } else switch (operand_ty.ptrSize()) { .One, .C => {}, @@ -4788,7 +4797,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr return; } - const elem_ty = operand_ty.elemType2(); + const elem_ty = operand_ty.elemType2(mod); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) { return sema.fail(block, src, "cannot dereference undefined value", .{}); @@ -4818,7 +4827,8 @@ fn failWithBadMemberAccess( field_src: LazySrcLoc, field_name: []const u8, ) CompileError { - const kw_name = switch (agg_ty.zigTypeTag()) { + const mod = sema.mod; + const kw_name = switch (agg_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", .Opaque => "opaque", @@ -4894,8 +4904,9 @@ fn failWithBadUnionFieldAccess( } fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { - const src_loc = decl_ty.declSrcLocOrNull(sema.mod) orelse return; - const category = switch (decl_ty.zigTypeTag()) { + const mod = sema.mod; + const src_loc = decl_ty.declSrcLocOrNull(mod) orelse return; + const category = switch (decl_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", .Enum => "enum", @@ -4903,7 +4914,7 @@ fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !vo .ErrorSet => "error set", else => unreachable, }; - try sema.mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); + try mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); } fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -5028,6 +5039,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const zir_tags = sema.code.instructions.items(.tag); const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].pl_node; @@ -5046,9 +5058,9 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v // %b = store(%a, %c) // Where %c is an error union or error set. In such case we need to add // to the current function's inferred error set, if any. - if (is_ret and (sema.typeOf(operand).zigTypeTag() == .ErrorUnion or - sema.typeOf(operand).zigTypeTag() == .ErrorSet) and - sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) + if (is_ret and (sema.typeOf(operand).zigTypeTag(mod) == .ErrorUnion or + sema.typeOf(operand).zigTypeTag(mod) == .ErrorSet) and + sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(operand); } @@ -6270,6 +6282,7 @@ fn zirCall( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node }; const call_src = inst_data.src(); @@ -6342,7 +6355,7 @@ fn zirCall( sema.inst_map.putAssumeCapacity(inst, inst: { if (arg_index >= fn_params_len) - break :inst Air.Inst.Ref.var_args_param; + break :inst Air.Inst.Ref.var_args_param_type; if (func_ty_info.param_types[arg_index].tag() == .generic_poison) break :inst Air.Inst.Ref.generic_poison_type; @@ -6352,10 +6365,10 @@ fn zirCall( const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); const resolved_ty = sema.typeOf(resolved); - if (resolved_ty.zigTypeTag() == .NoReturn) { + if (resolved_ty.zigTypeTag(mod) == .NoReturn) { return resolved; } - if (resolved_ty.isError()) { + if (resolved_ty.isError(mod)) { input_is_error = true; } resolved_args[arg_index] = resolved; @@ -6380,7 +6393,7 @@ fn zirCall( // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. - if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError())) { + if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError(mod))) { const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const field_index = try sema.structFieldIndex(block, stack_trace_ty, "index", call_src); @@ -6417,20 +6430,21 @@ fn checkCallArgumentCount( total_args: usize, member_fn: bool, ) !Type { + const mod = sema.mod; const func_ty = func_ty: { - switch (callee_ty.zigTypeTag()) { + switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { const ptr_info = callee_ty.ptrInfo().data; - if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { + if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, .Optional => { var buf: Type.Payload.ElemType = undefined; const opt_child = callee_ty.optionalChild(&buf); - if (opt_child.zigTypeTag() == .Fn or (opt_child.isSinglePointer() and - opt_child.childType().zigTypeTag() == .Fn)) + if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer() and + opt_child.childType().zigTypeTag(mod) == .Fn)) { const msg = msg: { const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{ @@ -6488,13 +6502,14 @@ fn callBuiltin( modifier: std.builtin.CallModifier, args: []const Air.Inst.Ref, ) !void { + const mod = sema.mod; const callee_ty = sema.typeOf(builtin_fn); const func_ty = func_ty: { - switch (callee_ty.zigTypeTag()) { + switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { const ptr_info = callee_ty.ptrInfo().data; - if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { + if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, @@ -6715,7 +6730,7 @@ fn analyzeCall( @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), else => { - assert(callee_ty.isPtrAtRuntime()); + assert(callee_ty.isPtrAtRuntime(mod)); return sema.fail(block, call_src, "{s} call of function pointer", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }); @@ -6978,7 +6993,7 @@ fn analyzeCall( break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges); }; - if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag() != .NoReturn) { + if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) { try sema.emitDbgInline( block, module_fn, @@ -7068,7 +7083,7 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); try sema.queueFullTypeResolution(func_ty_info.return_type); - if (sema.owner_func != null and func_ty_info.return_type.isError()) { + if (sema.owner_func != null and func_ty_info.return_type.isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7301,8 +7316,9 @@ fn analyzeGenericCallArg( new_fn_info: Type.Payload.Function.Data, runtime_i: *u32, ) !void { + const mod = sema.mod; const is_runtime = comptime_arg.val.tag() == .generic_poison and - comptime_arg.ty.hasRuntimeBits() and + comptime_arg.ty.hasRuntimeBits(mod) and !(try sema.typeRequiresComptime(comptime_arg.ty)); if (is_runtime) { const param_ty = new_fn_info.param_types[runtime_i.*]; @@ -7591,7 +7607,7 @@ fn instantiateGenericCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func != null and new_fn_info.return_type.isError()) { + if (sema.owner_func != null and new_fn_info.return_type.isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7872,8 +7888,9 @@ fn zirIntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const int_type = sema.code.instructions.items(.data)[inst].int_type; - const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count); + const ty = try mod.intType(int_type.signedness, int_type.bit_count); return sema.addType(ty); } @@ -7882,12 +7899,13 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const child_type = try sema.resolveType(block, operand_src, inst_data.operand); - if (child_type.zigTypeTag() == .Opaque) { + if (child_type.zigTypeTag(mod) == .Opaque) { return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); - } else if (child_type.zigTypeTag() == .Null) { + } else if (child_type.zigTypeTag(mod) == .Null) { return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); } const opt_type = try Type.optional(sema.arena, child_type); @@ -7896,14 +7914,15 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const bin = sema.code.instructions.items(.data)[inst].bin; const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs); - assert(indexable_ty.isIndexable()); // validated by a previous instruction - if (indexable_ty.zigTypeTag() == .Struct) { + assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction + if (indexable_ty.zigTypeTag(mod) == .Struct) { const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs)); return sema.addType(elem_type); } else { - const elem_type = indexable_ty.elemType2(); + const elem_type = indexable_ty.elemType2(mod); return sema.addType(elem_type); } } @@ -7960,9 +7979,10 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void { - if (elem_type.zigTypeTag() == .Opaque) { + const mod = sema.mod; + if (elem_type.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(sema.mod)}); - } else if (elem_type.zigTypeTag() == .NoReturn) { + } else if (elem_type.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{}); } } @@ -7986,6 +8006,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -7993,7 +8014,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const error_set = try sema.resolveType(block, lhs_src, extra.lhs); const payload = try sema.resolveType(block, rhs_src, extra.rhs); - if (error_set.zigTypeTag() != .ErrorSet) { + if (error_set.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{ error_set.fmt(sema.mod), }); @@ -8004,11 +8025,12 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void { - if (payload_ty.zigTypeTag() == .Opaque) { + const mod = sema.mod; + if (payload_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{ payload_ty.fmt(sema.mod), }); - } else if (payload_ty.zigTypeTag() == .ErrorSet) { + } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) { return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{ payload_ty.fmt(sema.mod), }); @@ -8089,10 +8111,10 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const uncasted_operand = try sema.resolveInst(extra.operand); const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src); - const target = sema.mod.getTarget(); + const mod = sema.mod; if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { - const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(target)); + const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod)); if (int > sema.mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); const payload = try sema.arena.create(Value.Payload.Error); @@ -8123,6 +8145,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; @@ -8130,7 +8153,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); - if (sema.typeOf(lhs).zigTypeTag() == .Bool and sema.typeOf(rhs).zigTypeTag() == .Bool) { + if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) { const msg = msg: { const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); @@ -8141,9 +8164,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); - if (lhs_ty.zigTypeTag() != .ErrorSet) + if (lhs_ty.zigTypeTag(mod) != .ErrorSet) return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(sema.mod)}); - if (rhs_ty.zigTypeTag() != .ErrorSet) + if (rhs_ty.zigTypeTag(mod) != .ErrorSet) return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); // Anything merged with anyerror is anyerror. @@ -8184,6 +8207,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -8191,7 +8215,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { const union_ty = try sema.resolveTypeFields(operand_ty); @@ -8213,8 +8237,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; const enum_tag_ty = sema.typeOf(enum_tag); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); + const int_tag_ty = try enum_tag_ty.intTagType().copy(arena); if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { return sema.addConstant(int_tag_ty, opv); @@ -8231,6 +8254,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -8239,15 +8263,14 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); - if (dest_ty.zigTypeTag() != .Enum) { + if (dest_ty.zigTypeTag(mod) != .Enum) { return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(sema.mod)}); } _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.resolveMaybeUndefVal(operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum()) { - var buffer: Type.Payload.Bits = undefined; - const int_tag_ty = dest_ty.intTagType(&buffer); + const int_tag_ty = dest_ty.intTagType(); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { return sema.addConstant(dest_ty, int_val); } @@ -8329,11 +8352,12 @@ fn analyzeOptionalPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const optional_ptr_ty = sema.typeOf(optional_ptr); - assert(optional_ptr_ty.zigTypeTag() == .Pointer); + assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer); const opt_type = optional_ptr_ty.elemType(); - if (opt_type.zigTypeTag() != .Optional) { + if (opt_type.zigTypeTag(mod) != .Optional) { return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)}); } @@ -8361,7 +8385,7 @@ fn analyzeOptionalPayloadPtr( ); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { - if (val.isNull()) { + if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. @@ -8397,11 +8421,12 @@ fn zirOptionalPayload( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const result_ty = switch (operand_ty.zigTypeTag()) { + const result_ty = switch (operand_ty.zigTypeTag(mod)) { .Optional => try operand_ty.optionalChildAlloc(sema.arena), .Pointer => t: { if (operand_ty.ptrSize() != .C) { @@ -8424,7 +8449,7 @@ fn zirOptionalPayload( }; if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.isNull()) { + if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } if (val.castTag(.opt_payload)) |payload| { @@ -8450,12 +8475,13 @@ fn zirErrUnionPayload( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_src = src; const err_union_ty = sema.typeOf(operand); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); @@ -8468,7 +8494,7 @@ fn analyzeErrUnionPayload( block: *Block, src: LazySrcLoc, err_union_ty: Type, - operand: Zir.Inst.Ref, + operand: Air.Inst.Ref, operand_src: LazySrcLoc, safety_check: bool, ) CompileError!Air.Inst.Ref { @@ -8517,10 +8543,11 @@ fn analyzeErrUnionPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - assert(operand_ty.zigTypeTag() == .Pointer); + assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { + if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.elemType().fmt(sema.mod), }); @@ -8594,8 +8621,9 @@ fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .ErrorUnion) { + if (operand_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.fmt(sema.mod), }); @@ -8617,13 +8645,14 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - assert(operand_ty.zigTypeTag() == .Pointer); + assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { + if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ operand_ty.elemType().fmt(sema.mod), }); @@ -8677,8 +8706,7 @@ fn zirFunc( extra_index += ret_ty_body.len; const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, "return type must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - break :blk try ret_ty_val.toType(&buffer).copy(sema.arena); + break :blk try ret_ty_val.toType().copy(sema.arena); }, }; @@ -8849,6 +8877,7 @@ fn funcCommon( noalias_bits: u32, is_noinline: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset }; const func_src = LazySrcLoc.nodeOffset(src_node_offset); @@ -8890,31 +8919,6 @@ fn funcCommon( const target = sema.mod.getTarget(); const fn_ty: Type = fn_ty: { - // Hot path for some common function types. - // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated. - if (!is_generic and block.params.items.len == 0 and !var_args and !inferred_error_set and - alignment.? == 0 and - address_space.? == target_util.defaultAddressSpace(target, .function) and - section == .default and - !is_noinline) - { - if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Unspecified) { - break :fn_ty Type.initTag(.fn_noreturn_no_args); - } - - if (bare_return_type.zigTypeTag() == .Void and cc.? == .Unspecified) { - break :fn_ty Type.initTag(.fn_void_no_args); - } - - if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Naked) { - break :fn_ty Type.initTag(.fn_naked_noreturn_no_args); - } - - if (bare_return_type.zigTypeTag() == .Void and cc.? == .C) { - break :fn_ty Type.initTag(.fn_ccc_void_no_args); - } - } - // In the case of generic calling convention, or generic alignment, we use // default values which are only meaningful for the generic function, *not* // the instantiation, which can depend on comptime parameters. @@ -8985,8 +8989,8 @@ fn funcCommon( }); }; - if (!return_type.isValidReturnType()) { - const opaque_str = if (return_type.zigTypeTag() == .Opaque) "opaque " else ""; + if (!return_type.isValidReturnType(mod)) { + const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{ opaque_str, return_type.fmt(sema.mod), @@ -9201,22 +9205,23 @@ fn analyzeParameter( has_body: bool, is_noalias: bool, ) !void { + const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); comptime_params[i] = param.is_comptime or requires_comptime; const this_generic = param.ty.tag() == .generic_poison; is_generic.* = is_generic.* or this_generic; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } if (this_generic and !sema.no_partial_func_ty and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } - if (!param.ty.isValidParamType()) { - const opaque_str = if (param.ty.zigTypeTag() == .Opaque) "opaque " else ""; + if (!param.ty.isValidParamType(mod)) { + const opaque_str = if (param.ty.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{ - opaque_str, param.ty.fmt(sema.mod), + opaque_str, param.ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); @@ -9228,11 +9233,11 @@ fn analyzeParameter( if (!this_generic and !Type.fnCallingConventionAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ - param.ty.fmt(sema.mod), @tagName(cc), + param.ty.fmt(mod), @tagName(cc), }); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl), param.ty, .param_ty); try sema.addDeclaredHereNote(msg, param.ty); @@ -9243,11 +9248,11 @@ fn analyzeParameter( if (!sema.is_generic_instantiation and requires_comptime and !param.is_comptime and has_body) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{ - param.ty.fmt(sema.mod), + param.ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl), param.ty); try sema.addDeclaredHereNote(msg, param.ty); @@ -9256,7 +9261,7 @@ fn analyzeParameter( return sema.failWithOwnedErrorMsg(msg); } if (!sema.is_generic_instantiation and !this_generic and is_noalias and - !(param.ty.zigTypeTag() == .Pointer or param.ty.isPtrLikeOptional())) + !(param.ty.zigTypeTag(mod) == .Pointer or param.ty.isPtrLikeOptional(mod))) { return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{}); } @@ -9472,13 +9477,14 @@ fn analyzeAs( zir_operand: Zir.Inst.Ref, no_cast_to_comptime_int: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand = try sema.resolveInst(zir_operand); - if (zir_dest_type == .var_args_param) return operand; + if (zir_dest_type == .var_args_param_type) return operand; const dest_ty = sema.resolveType(block, src, zir_dest_type) catch |err| switch (err) { error.GenericPoison => return operand, else => |e| return e, }; - if (dest_ty.zigTypeTag() == .NoReturn) { + if (dest_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "cannot cast to noreturn", .{}); } const is_ret = if (Zir.refToIndex(zir_dest_type)) |ptr_index| @@ -9495,11 +9501,12 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); - if (!ptr_ty.isPtrAtRuntime()) { + if (!ptr_ty.isPtrAtRuntime(mod)) { return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); } if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| { @@ -9586,25 +9593,25 @@ fn intCast( operand_src: LazySrcLoc, runtime_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); if (try sema.isComptimeKnown(operand)) { return sema.coerce(block, dest_ty, operand, operand_src); - } else if (dest_scalar_ty.zigTypeTag() == .ComptimeInt) { + } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{}); } try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src); - const is_vector = dest_ty.zigTypeTag() == .Vector; + const is_vector = dest_ty.zigTypeTag(mod) == .Vector; if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| { // requirement: intCast(u0, input) iff input == 0 if (runtime_safety and block.wantSafety()) { try sema.requireRuntimeBlock(block, src, operand_src); - const target = sema.mod.getTarget(); - const wanted_info = dest_scalar_ty.intInfo(target); + const wanted_info = dest_scalar_ty.intInfo(mod); const wanted_bits = wanted_info.bits; if (wanted_bits == 0) { @@ -9631,9 +9638,8 @@ fn intCast( try sema.requireRuntimeBlock(block, src, operand_src); if (runtime_safety and block.wantSafety()) { - const target = sema.mod.getTarget(); - const actual_info = operand_scalar_ty.intInfo(target); - const wanted_info = dest_scalar_ty.intInfo(target); + const actual_info = operand_scalar_ty.intInfo(mod); + const wanted_info = dest_scalar_ty.intInfo(mod); const actual_bits = actual_info.bits; const wanted_bits = wanted_info.bits; const actual_value_bits = actual_bits - @boolToInt(actual_info.signedness == .signed); @@ -9642,7 +9648,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, target); + const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, mod); const dest_max_val = if (is_vector) try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) else @@ -9653,7 +9659,7 @@ fn intCast( if (actual_info.signedness == .signed) { // Reinterpret the sign-bit as part of the value. This will make // negative differences (`operand` > `dest_max`) appear too big. - const unsigned_operand_ty = try Type.Tag.int_unsigned.create(sema.arena, actual_bits); + const unsigned_operand_ty = try mod.intType(.unsigned, actual_bits); const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff); // If the destination type is signed, then we need to double its @@ -9727,6 +9733,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -9735,7 +9742,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, @@ -9757,7 +9764,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(sema.mod)}), else => {}, } @@ -9771,7 +9778,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(sema.mod)}), .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(sema.mod)}), else => {}, @@ -9782,7 +9789,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); }, .Struct, .Union => if (dest_ty.containerLayout() == .Auto) { - const container = switch (dest_ty.zigTypeTag()) { + const container = switch (dest_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, @@ -9799,7 +9806,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Vector, => {}, } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, @@ -9821,7 +9828,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), else => {}, } @@ -9834,7 +9841,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const msg = msg: { const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), .Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(sema.mod)}), else => {}, @@ -9845,7 +9852,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); }, .Struct, .Union => if (operand_ty.containerLayout() == .Auto) { - const container = switch (operand_ty.zigTypeTag()) { + const container = switch (operand_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, @@ -9869,6 +9876,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -9878,7 +9886,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(extra.rhs); const target = sema.mod.getTarget(); - const dest_is_comptime_float = switch (dest_ty.zigTypeTag()) { + const dest_is_comptime_float = switch (dest_ty.zigTypeTag(mod)) { .ComptimeFloat => true, .Float => false, else => return sema.fail( @@ -9890,7 +9898,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.fail( block, @@ -9944,20 +9952,21 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const elem_index = try sema.resolveInst(extra.rhs); const indexable_ty = sema.typeOf(array_ptr); - if (indexable_ty.zigTypeTag() != .Pointer) { + if (indexable_ty.zigTypeTag(mod) != .Pointer) { const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node }; const msg = msg: { const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{ indexable_ty.fmt(sema.mod), }); errdefer msg.destroy(sema.gpa); - if (indexable_ty.zigTypeTag() == .Array) { + if (indexable_ty.zigTypeTag(mod) == .Array) { try sema.errNote(block, src, msg, "consider using '&' here", .{}); } break :msg msg; @@ -10076,6 +10085,7 @@ fn zirSwitchCapture( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; @@ -10091,7 +10101,7 @@ fn zirSwitchCapture( if (block.inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; - if (operand_ty.zigTypeTag() == .Union) { + if (operand_ty.zigTypeTag(mod) == .Union) { const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?); const union_obj = operand_ty.cast(Type.Payload.Union).?.data; const field_ty = union_obj.fields.values()[field_index].ty; @@ -10144,7 +10154,7 @@ fn zirSwitchCapture( return operand_ptr; } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ErrorSet => if (block.switch_else_err_ty) |some| { return sema.bitCast(block, some, operand, operand_src, null); } else { @@ -10162,7 +10172,7 @@ fn zirSwitchCapture( switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index).item, }; - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Union => { const union_obj = operand_ty.cast(Type.Payload.Union).?.data; const first_item = try sema.resolveInst(items[0]); @@ -10269,6 +10279,7 @@ fn zirSwitchCapture( } fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_tok; const src = inst_data.src(); @@ -10280,7 +10291,7 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile const operand_ptr_ty = sema.typeOf(operand_ptr); const operand_ty = if (is_ref) operand_ptr_ty.childType() else operand_ptr_ty; - if (operand_ty.zigTypeTag() != .Union) { + if (operand_ty.zigTypeTag(mod) != .Union) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot capture tag of non-union type '{}'", .{ operand_ty.fmt(sema.mod), @@ -10301,6 +10312,7 @@ fn zirSwitchCond( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; @@ -10311,7 +10323,7 @@ fn zirSwitchCond( operand_ptr; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Type, .Void, .Bool, @@ -10371,6 +10383,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -10415,7 +10428,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const target_ty = sema.typeOf(raw_operand); break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.elemType() else target_ty; }; - const union_originally = maybe_union_ty.zigTypeTag() == .Union; + const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union; // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?Module.SwitchProngSrc = &.{}; @@ -10433,7 +10446,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var empty_enum = false; const operand_ty = sema.typeOf(operand); - const err_set = operand_ty.zigTypeTag() == .ErrorSet; + const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet; var else_error_ty: ?Type = null; @@ -10459,10 +10472,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.failWithOwnedErrorMsg(msg); } - const target = sema.mod.getTarget(); - // Validate for duplicate items, missing else prong, and invalid range. - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Union => unreachable, // handled in zirSwitchCond .Enum => { seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); @@ -10774,12 +10785,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } check_range: { - if (operand_ty.zigTypeTag() == .Int) { + if (operand_ty.zigTypeTag(mod) == .Int) { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); - const min_int = try operand_ty.minInt(arena.allocator(), target); - const max_int = try operand_ty.maxInt(arena.allocator(), target); + const min_int = try operand_ty.minInt(arena.allocator(), mod); + const max_int = try operand_ty.maxInt(arena.allocator(), mod); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -11080,7 +11091,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand)) { return Air.Inst.Ref.unreachable_value; } - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag() == .Enum and + if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); @@ -11135,7 +11146,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand)) { @@ -11242,7 +11253,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { @@ -11286,7 +11297,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - if (field_ty.zigTypeTag() != .NoReturn) break true; + if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; @@ -11409,7 +11420,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var final_else_body: []const Air.Inst.Index = &.{}; if (special.body.len != 0 or !is_first or case_block.wantSafety()) { var emit_bb = false; - if (special.is_inline) switch (operand_ty.zigTypeTag()) { + if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) { .Enum => { if (operand_ty.isNonexhaustiveEnum() and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ @@ -11429,7 +11440,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src); @@ -11551,7 +11562,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.inline_case_capture = .none; if (sema.mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and - operand_ty.zigTypeTag() == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) + operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); const ok = try case_block.addUnOp(.is_named_enum_value, operand); @@ -11563,7 +11574,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (seen_field != null) continue; const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data; const field_ty = union_obj.fields.values()[index].ty; - if (field_ty.zigTypeTag() != .NoReturn) break true; + if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; @@ -11629,9 +11640,9 @@ const RangeSetUnhandledIterator = struct { first: bool = true, fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { - const target = sema.mod.getTarget(); - const min = try ty.minInt(sema.arena, target); - const max = try ty.maxInt(sema.arena, target); + const mod = sema.mod; + const min = try ty.minInt(sema.arena, mod); + const max = try ty.maxInt(sema.arena, mod); return RangeSetUnhandledIterator{ .sema = sema, @@ -11931,18 +11942,19 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op } fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void { + const mod = sema.mod; const index = Zir.refToIndex(cond) orelse return; if (sema.code.instructions.items(.tag)[index] != .is_non_err) return; const err_inst_data = sema.code.instructions.items(.data)[index].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); - if (operand_ty.zigTypeTag() == .ErrorSet) { + if (operand_ty.zigTypeTag(mod) == .ErrorSet) { try sema.maybeErrorUnwrapComptime(block, body, err_operand); return; } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { - if (!operand_ty.isError()) return; + if (!operand_ty.isError(mod)) return; if (val.getError() == null) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } @@ -11972,6 +11984,7 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I } fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -11995,7 +12008,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; break :hf field_index < ty.structFieldCount(); } - break :hf switch (ty.zigTypeTag()) { + break :hf switch (ty.zigTypeTag(mod)) { .Struct => ty.structFields().contains(field_name), .Union => ty.unionFields().contains(field_name), .Enum => ty.enumFields().contains(field_name), @@ -12126,6 +12139,7 @@ fn zirShl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); sema.src = src; @@ -12136,11 +12150,10 @@ fn zirShl( const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const target = sema.mod.getTarget(); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - const scalar_ty = lhs_ty.scalarType(); - const scalar_rhs_ty = rhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); + const scalar_rhs_ty = rhs_ty.scalarType(mod); // TODO coerce rhs if air_tag is not shl_sat const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty); @@ -12156,18 +12169,18 @@ fn zirShl( if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { return lhs; } - if (scalar_ty.zigTypeTag() != .ComptimeInt and air_tag != .shl_sat) { + if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { var bits_payload = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(target).bits, + .data = scalar_ty.intInfo(mod).bits, }; const bit_value = Value.initPayload(&bits_payload.base); - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.gte, bit_value, target)) { + if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, @@ -12175,26 +12188,26 @@ fn zirShl( }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, target)) { + } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), scalar_ty.fmt(sema.mod), }); } } - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.lt, Value.zero, target)) { + if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, target)) { + } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -12204,7 +12217,7 @@ fn zirShl( const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); const rhs_val = maybe_rhs_val orelse { - if (scalar_ty.zigTypeTag() == .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } break :rs rhs_src; @@ -12213,7 +12226,7 @@ fn zirShl( const val = switch (air_tag) { .shl_exact => val: { const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod); - if (scalar_ty.zigTypeTag() == .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { break :val shifted.wrapped_result; } if (shifted.overflow_bit.compareAllWithZero(.eq, sema.mod)) { @@ -12222,12 +12235,12 @@ fn zirShl( return sema.fail(block, src, "operation caused overflow", .{}); }, - .shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt) + .shl_sat => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) else try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod), - .shl => if (scalar_ty.zigTypeTag() == .ComptimeInt) + .shl => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) else try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod), @@ -12241,11 +12254,11 @@ fn zirShl( const new_rhs = if (air_tag == .shl_sat) rhs: { // Limit the RHS type for saturating shl to be an integer as small as the LHS. if (rhs_is_comptime_int or - scalar_rhs_ty.intInfo(target).bits > scalar_ty.intInfo(target).bits) + scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits) { const max_int = try sema.addConstant( lhs_ty, - try lhs_ty.maxInt(sema.arena, target), + try lhs_ty.maxInt(sema.arena, mod), ); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); @@ -12256,11 +12269,11 @@ fn zirShl( try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { - const bit_count = scalar_ty.intInfo(target).bits; + const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ @@ -12290,7 +12303,7 @@ fn zirShl( } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (lhs_ty.zigTypeTag() == .Vector) + const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -12319,6 +12332,7 @@ fn zirShr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); sema.src = src; @@ -12330,8 +12344,7 @@ fn zirShr( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - const target = sema.mod.getTarget(); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); @@ -12344,18 +12357,18 @@ fn zirShr( if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { return lhs; } - if (scalar_ty.zigTypeTag() != .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { var bits_payload = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(target).bits, + .data = scalar_ty.intInfo(mod).bits, }; const bit_value = Value.initPayload(&bits_payload.base); - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.gte, bit_value, target)) { + if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, @@ -12363,26 +12376,26 @@ fn zirShr( }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, target)) { + } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), scalar_ty.fmt(sema.mod), }); } } - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen()) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.lt, Value.zero, target)) { + if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, target)) { + } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -12405,18 +12418,18 @@ fn zirShr( } } else rhs_src; - if (maybe_rhs_val == null and scalar_ty.zigTypeTag() == .ComptimeInt) { + if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } try sema.requireRuntimeBlock(block, src, runtime_src); const result = try block.addBinOp(air_tag, lhs, rhs); if (block.wantSafety()) { - const bit_count = scalar_ty.intInfo(target).bits; + const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ @@ -12436,7 +12449,7 @@ fn zirShr( if (air_tag == .shr_exact) { const back = try block.addBinOp(.shl, result, rhs); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try block.addCmpVector(lhs, back, .eq); break :ok try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, @@ -12461,6 +12474,7 @@ fn zirBitwise( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -12475,8 +12489,8 @@ fn zirBitwise( const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); - const scalar_type = resolved_type.scalarType(); - const scalar_tag = scalar_type.zigTypeTag(); + const scalar_type = resolved_type.scalarType(mod); + const scalar_tag = scalar_type.zigTypeTag(mod); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -12484,7 +12498,7 @@ fn zirBitwise( const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { - return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); + return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) }); } const runtime_src = runtime: { @@ -12515,15 +12529,16 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_type = sema.typeOf(operand); - const scalar_type = operand_type.scalarType(); + const scalar_type = operand_type.scalarType(mod); - if (scalar_type.zigTypeTag() != .Int) { + if (scalar_type.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{ operand_type.fmt(sema.mod), }); @@ -12532,7 +12547,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) { return sema.addConstUndef(operand_type); - } else if (operand_type.zigTypeTag() == .Vector) { + } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen()); var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); @@ -12728,18 +12743,19 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); + const mod = sema.mod; const ptr_addrspace = p: { - if (lhs_ty.zigTypeTag() == .Pointer) break :p lhs_ty.ptrAddressSpace(); - if (rhs_ty.zigTypeTag() == .Pointer) break :p rhs_ty.ptrAddressSpace(); + if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(); + if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(); break :p null; }; - const runtime_src = if (switch (lhs_ty.zigTypeTag()) { + const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveMaybeUndefVal(lhs), .Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs), else => unreachable, }) |lhs_val| rs: { - if (switch (rhs_ty.zigTypeTag()) { + if (switch (rhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveMaybeUndefVal(rhs), .Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs), else => unreachable, @@ -12841,8 +12857,9 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Array => return operand_ty.arrayInfo(), .Pointer => { const ptr_info = operand_ty.ptrInfo().data; @@ -12859,7 +12876,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins }; }, .One => { - if (ptr_info.pointee_type.zigTypeTag() == .Array) { + if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { return ptr_info.pointee_type.arrayInfo(); } }, @@ -12867,10 +12884,10 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins } }, .Struct => { - if (operand_ty.isTuple() and peer_ty.isIndexable()) { + if (operand_ty.isTuple() and peer_ty.isIndexable(mod)) { assert(!peer_ty.isTuple()); return .{ - .elem_type = peer_ty.elemType2(), + .elem_type = peer_ty.elemType2(mod), .sentinel = null, .len = operand_ty.arrayLen(), }; @@ -12970,11 +12987,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } // Analyze the lhs first, to catch the case that someone tried to do exponentiation + const mod = sema.mod; const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse { const msg = msg: { const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => { try sema.errNote(block, operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{}); }, @@ -12994,7 +13012,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); - const ptr_addrspace = if (lhs_ty.zigTypeTag() == .Pointer) lhs_ty.ptrAddressSpace() else null; + const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace() else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { @@ -13082,6 +13100,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; @@ -13089,9 +13108,9 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); - const rhs_scalar_ty = rhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - if (rhs_scalar_ty.isUnsignedInt() or switch (rhs_scalar_ty.zigTypeTag()) { + if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => false, else => true, }) { @@ -13108,7 +13127,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs); } - const lhs = if (rhs_ty.zigTypeTag() == .Vector) + const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) else try sema.resolveInst(.zero); @@ -13117,6 +13136,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; @@ -13124,14 +13144,14 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); - const rhs_scalar_ty = rhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - switch (rhs_scalar_ty.zigTypeTag()) { + switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => {}, else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}), } - const lhs = if (rhs_ty.zigTypeTag() == .Vector) + const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) else try sema.resolveInst(.zero); @@ -13161,6 +13181,7 @@ fn zirArithmetic( } fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13171,8 +13192,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13181,25 +13202,24 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); - if ((lhs_ty.zigTypeTag() == .ComptimeFloat and rhs_ty.zigTypeTag() == .ComptimeInt) or - (lhs_ty.zigTypeTag() == .ComptimeInt and rhs_ty.zigTypeTag() == .ComptimeFloat)) + if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or + (lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat)) { // If it makes a difference whether we coerce to ints or floats before doing the division, error. // If lhs % rhs is 0, it doesn't matter. @@ -13268,7 +13288,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const runtime_src = rs: { if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { return sema.addConstUndef(resolved_type); @@ -13309,7 +13329,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } const air_tag = if (is_int) blk: { - if (lhs_ty.isSignedInt() or rhs_ty.isSignedInt()) { + if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) { return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) }); } break :blk Air.Inst.Tag.div_trunc; @@ -13321,6 +13341,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13331,8 +13352,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13341,19 +13362,18 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13437,7 +13457,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ok = if (!is_int) ok: { const floored = try block.addUnOp(.floor, result); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const eql = try block.addCmpVector(result, floored, .eq); break :ok try block.addInst(.{ .tag = switch (block.float_mode) { @@ -13459,7 +13479,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else ok: { const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); @@ -13484,6 +13504,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13494,8 +13515,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13504,20 +13525,19 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13562,7 +13582,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { return sema.addConstUndef(resolved_type); @@ -13600,6 +13620,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13610,8 +13631,8 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13620,20 +13641,19 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13677,7 +13697,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { return sema.addConstUndef(resolved_type); @@ -13727,22 +13747,20 @@ fn addDivIntOverflowSafety( casted_rhs: Air.Inst.Ref, is_int: bool, ) CompileError!void { + const mod = sema.mod; if (!is_int) return; // If the LHS is unsigned, it cannot cause overflow. - if (!lhs_scalar_ty.isSignedInt()) return; - - const mod = sema.mod; - const target = mod.getTarget(); + if (!lhs_scalar_ty.isSignedInt(mod)) return; // If the LHS is widened to a larger integer type, no overflow is possible. - if (lhs_scalar_ty.intInfo(target).bits < resolved_type.intInfo(target).bits) { + if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) { return; } - const min_int = try resolved_type.minInt(sema.arena, target); + const min_int = try resolved_type.minInt(sema.arena, mod); const neg_one_scalar = try Value.Tag.int_i64.create(sema.arena, -1); - const neg_one = if (resolved_type.zigTypeTag() == .Vector) + const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector) try Value.Tag.repeated.create(sema.arena, neg_one_scalar) else neg_one_scalar; @@ -13759,7 +13777,7 @@ fn addDivIntOverflowSafety( } var ok: Air.Inst.Ref = .none; - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { if (maybe_lhs_val == null) { const min_int_ref = try sema.addConstant(resolved_type, min_int); ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq); @@ -13815,7 +13833,8 @@ fn addDivByZeroSafety( // emitted above. if (maybe_rhs_val != null) return; - const ok = if (resolved_type.zigTypeTag() == .Vector) ok: { + const mod = sema.mod; + const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); @@ -13842,6 +13861,7 @@ fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst } fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13852,8 +13872,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13862,20 +13882,19 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13904,7 +13923,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } else Value.zero; return sema.addConstant(resolved_type, zero_val); } - } else if (lhs_scalar_ty.isSignedInt()) { + } else if (lhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { @@ -13929,7 +13948,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstant(resolved_type, rem_result); } break :rs lhs_src; - } else if (rhs_scalar_ty.isSignedInt()) { + } else if (rhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } else { break :rs rhs_src; @@ -13978,7 +13997,8 @@ fn intRem( lhs: Value, rhs: Value, ) CompileError!Value { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; @@ -13997,13 +14017,13 @@ fn intRemScalar( lhs: Value, rhs: Value, ) CompileError!Value { - const target = sema.mod.getTarget(); + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, @@ -14025,6 +14045,7 @@ fn intRemScalar( } fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -14035,8 +14056,8 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -14048,13 +14069,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -14127,6 +14147,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -14137,8 +14158,8 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -14150,13 +14171,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -14268,7 +14288,7 @@ fn zirOverflowArithmetic( const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src); const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src); - if (dest_ty.scalarType().zigTypeTag() != .Int) { + if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) { return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)}); } @@ -14434,12 +14454,14 @@ fn zirOverflowArithmetic( } fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { - if (ty.zigTypeTag() != .Vector) return val; + const mod = sema.mod; + if (ty.zigTypeTag(mod) != .Vector) return val; return Value.Tag.repeated.create(sema.arena, val); } fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { - const ov_ty = if (ty.zigTypeTag() == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; + const mod = sema.mod; + const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; const types = try sema.arena.alloc(Type, 2); const values = try sema.arena.alloc(Value, 2); @@ -14468,10 +14490,11 @@ fn analyzeArithmetic( rhs_src: LazySrcLoc, want_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { @@ -14491,18 +14514,17 @@ fn analyzeArithmetic( .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { @@ -14910,7 +14932,7 @@ fn analyzeArithmetic( } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (resolved_type.zigTypeTag() == .Vector) + const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -14944,12 +14966,12 @@ fn analyzePtrArithmetic( // TODO if the operand is comptime-known to be negative, or is a negative int, // coerce to isize instead of usize. const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); - const target = sema.mod.getTarget(); + const mod = sema.mod; const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr); const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); const ptr_info = ptr_ty.ptrInfo().data; - const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Array) + const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array) ptr_info.pointee_type.childType() else ptr_info.pointee_type; @@ -14963,9 +14985,9 @@ fn analyzePtrArithmetic( } // If the addend is not a comptime-known value we can still count on // it being a multiple of the type size. - const elem_size = elem_ty.abiSize(target); + const elem_size = elem_ty.abiSize(mod); const addend = if (opt_off_val) |off_val| a: { - const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(target)); + const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(mod)); break :a elem_size * off_int; } else elem_size; @@ -14991,10 +15013,10 @@ fn analyzePtrArithmetic( if (opt_off_val) |offset_val| { if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty); - const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(target)); + const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod)); if (offset_int == 0) return ptr; - if (try ptr_val.getUnsignedIntAdvanced(target, sema)) |addr| { - const elem_size = elem_ty.abiSize(target); + if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| { + const elem_size = elem_ty.abiSize(mod); const new_addr = switch (air_tag) { .ptr_add => addr + elem_size * offset_int, .ptr_sub => addr - elem_size * offset_int, @@ -15116,6 +15138,7 @@ fn zirAsm( const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); const inputs = try sema.arena.alloc(ConstraintName, inputs_len); + const mod = sema.mod; for (args, 0..) |*arg, arg_i| { const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i); @@ -15123,7 +15146,7 @@ fn zirAsm( const uncasted_arg = try sema.resolveInst(input.data.operand); const uncasted_arg_ty = sema.typeOf(uncasted_arg); - switch (uncasted_arg_ty.zigTypeTag()) { + switch (uncasted_arg_ty.zigTypeTag(mod)) { .ComptimeInt => arg.* = try sema.coerce(block, Type.initTag(.usize), uncasted_arg, src), .ComptimeFloat => arg.* = try sema.coerce(block, Type.initTag(.f64), uncasted_arg, src), else => { @@ -15205,6 +15228,7 @@ fn zirCmpEq( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = inst_data.src(); @@ -15215,8 +15239,8 @@ fn zirCmpEq( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_ty_tag = lhs_ty.zigTypeTag(); - const rhs_ty_tag = rhs_ty.zigTypeTag(); + const lhs_ty_tag = lhs_ty.zigTypeTag(mod); + const rhs_ty_tag = rhs_ty.zigTypeTag(mod); if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) { // null == null, null != null if (op == .eq) { @@ -15295,6 +15319,7 @@ fn analyzeCmpUnionTag( tag_src: LazySrcLoc, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const union_ty = try sema.resolveTypeFields(sema.typeOf(un)); const union_tag_ty = union_ty.unionTagType() orelse { const msg = msg: { @@ -15313,7 +15338,7 @@ fn analyzeCmpUnionTag( if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| { if (enum_val.isUndef()) return sema.addConstUndef(Type.bool); const field_ty = union_ty.unionFieldType(enum_val, sema.mod); - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } } @@ -15352,32 +15377,33 @@ fn analyzeCmp( rhs_src: LazySrcLoc, is_equality_cmp: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - if (lhs_ty.zigTypeTag() != .Optional and rhs_ty.zigTypeTag() != .Optional) { + if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) { try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); } - if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) { return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src); } - if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { + if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for // numeric types. return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); } - if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorUnion and rhs_ty.zigTypeTag() == .ErrorSet) { + if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) { const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs); return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src); } - if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorSet and rhs_ty.zigTypeTag() == .ErrorUnion) { + if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) { const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs); return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src); } const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); - if (!resolved_type.isSelfComparable(is_equality_cmp)) { + if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) { return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{ compareOperatorName(op), resolved_type.fmt(sema.mod), }); @@ -15408,6 +15434,7 @@ fn cmpSelf( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { @@ -15415,7 +15442,7 @@ fn cmpSelf( if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool); const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); return sema.addConstant(result_ty, cmp_val); @@ -15427,7 +15454,7 @@ fn cmpSelf( return Air.Inst.Ref.bool_false; } } else { - if (resolved_type.zigTypeTag() == .Bool) { + if (resolved_type.zigTypeTag(mod) == .Bool) { // We can lower bool eq/neq more efficiently. return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src); } @@ -15436,7 +15463,7 @@ fn cmpSelf( } else { // For bools, we still check the other operand, because we can lower // bool eq/neq more efficiently. - if (resolved_type.zigTypeTag() == .Bool) { + if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src); @@ -15446,7 +15473,7 @@ fn cmpSelf( } }; try sema.requireRuntimeBlock(block, src, runtime_src); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { return block.addCmpVector(casted_lhs, casted_rhs, op); } const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized); @@ -15475,10 +15502,11 @@ fn runtimeBoolCmp( } fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, @@ -15509,8 +15537,7 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .AnyFrame, => {}, } - const target = sema.mod.getTarget(); - const val = try ty.lazyAbiSize(target, sema.arena); + const val = try ty.lazyAbiSize(mod, sema.arena); if (val.tag() == .lazy_size) { try sema.queueFullTypeResolution(ty); } @@ -15518,10 +15545,11 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, @@ -15552,8 +15580,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .AnyFrame, => {}, } - const target = sema.mod.getTarget(); - const bit_size = try operand_ty.bitSizeAdvanced(target, sema); + const bit_size = try operand_ty.bitSizeAdvanced(mod, sema); return sema.addIntUnsigned(Type.comptime_int, bit_size); } @@ -15765,13 +15792,13 @@ fn zirBuiltinSrc( } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); const type_info_ty = try sema.getBuiltinType("Type"); - const target = sema.mod.getTarget(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ @@ -15881,8 +15908,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); try sema.ensureDeclAnalyzed(fn_info_decl_index); const fn_info_decl = sema.mod.declPtr(fn_info_decl_index); - var fn_ty_buffer: Value.ToTypeBuffer = undefined; - const fn_ty = fn_info_decl.val.toType(&fn_ty_buffer); + const fn_ty = fn_info_decl.val.toType(); const param_info_decl_index = (try sema.namespaceLookup( block, src, @@ -15892,8 +15918,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); try sema.ensureDeclAnalyzed(param_info_decl_index); const param_info_decl = sema.mod.declPtr(param_info_decl_index); - var param_buffer: Value.ToTypeBuffer = undefined; - const param_ty = param_info_decl.val.toType(¶m_buffer); + const param_ty = param_info_decl.val.toType(); const new_decl = try params_anon_decl.finish( try Type.Tag.array.create(params_anon_decl.arena(), .{ .len = param_vals.len, @@ -15924,7 +15949,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // calling_convention: CallingConvention, try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)), // alignment: comptime_int, - try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target)), + try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(mod)), // is_generic: bool, Value.makeBool(info.is_generic), // is_var_args: bool, @@ -15944,7 +15969,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // signedness: Signedness, field_values[0] = try Value.Tag.enum_field_index.create( @@ -15965,7 +15990,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Float => { const field_values = try sema.arena.alloc(Value, 1); // bits: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(target)); + field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(mod)); return sema.addConstant( type_info_ty, @@ -15980,7 +16005,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const alignment = if (info.@"align" != 0) try Value.Tag.int_u64.create(sema.arena, info.@"align") else - try info.pointee_type.lazyAbiAlignment(target, sema.arena); + try info.pointee_type.lazyAbiAlignment(mod, sema.arena); const field_values = try sema.arena.create([8]Value); field_values.* = .{ @@ -16072,8 +16097,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); const set_field_ty_decl = sema.mod.declPtr(set_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try set_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try set_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; try sema.queueFullTypeResolution(try error_field_ty.copy(sema.arena)); @@ -16164,8 +16188,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Enum => { // TODO: look into memoizing this result. - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try ty.intTagType(&int_tag_type_buffer).copy(sema.arena); + const int_tag_ty = try ty.intTagType().copy(sema.arena); const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum()); @@ -16182,8 +16205,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); const enum_field_ty_decl = sema.mod.declPtr(enum_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try enum_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try enum_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const enum_fields = ty.enumFields(); @@ -16275,8 +16297,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); const union_field_ty_decl = sema.mod.declPtr(union_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try union_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try union_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const union_ty = try sema.resolveTypeFields(ty); @@ -16383,8 +16404,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); const struct_field_ty_decl = sema.mod.declPtr(struct_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try struct_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + break :t try struct_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout @@ -16430,7 +16450,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(is_comptime), // alignment: comptime_int, - try field_ty.lazyAbiAlignment(target, fields_anon_decl.arena()), + try field_ty.lazyAbiAlignment(mod, fields_anon_decl.arena()), }; struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); } @@ -16463,7 +16483,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else field.default_val; const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); - const alignment = field.alignment(target, layout); + const alignment = field.alignment(mod, layout); struct_field_fields.* = .{ // name: []const u8, @@ -16506,7 +16526,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (layout == .Packed) { const struct_obj = struct_ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); - assert(struct_obj.backing_int_ty.isInt()); + assert(struct_obj.backing_int_ty.isInt(mod)); const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); } else { @@ -16584,8 +16604,7 @@ fn typeInfoDecls( try sema.mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); const declaration_ty_decl = sema.mod.declPtr(declaration_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try declaration_ty_decl.val.toType(&buffer).copy(decls_anon_decl.arena()); + break :t try declaration_ty_decl.val.toType().copy(decls_anon_decl.arena()); }; try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena)); @@ -16632,8 +16651,7 @@ fn typeInfoNamespaceDecls( if (decl.kind == .@"usingnamespace") { if (decl.analysis == .in_progress) continue; try sema.mod.ensureDeclAnalyzed(decl_index); - var buf: Value.ToTypeBuffer = undefined; - const new_ns = decl.val.toType(&buf).getNamespace().?; + const new_ns = decl.val.toType().getNamespace().?; try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); continue; } @@ -16709,10 +16727,11 @@ fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type { - switch (operand.zigTypeTag()) { + const mod = sema.mod; + switch (operand.zigTypeTag(mod)) { .ComptimeInt => return Type.comptime_int, .Int => { - const bits = operand.bitSize(sema.mod.getTarget()); + const bits = operand.bitSize(mod); const count = if (bits == 0) 0 else blk: { @@ -16723,10 +16742,10 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi } break :blk count; }; - return Module.makeIntType(sema.arena, .unsigned, count); + return mod.intType(.unsigned, count); }, .Vector => { - const elem_ty = operand.elemType2(); + const elem_ty = operand.elemType2(mod); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); return Type.Tag.vector.create(sema.arena, .{ .len = operand.vectorLen(), @@ -16920,9 +16939,10 @@ fn finishCondBr( } fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Optional, .Null, .Undefined => return, - .Pointer => if (ty.isPtrLikeOptional()) return, + .Pointer => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.failWithExpectedOptionalType(block, src, ty); @@ -16951,10 +16971,11 @@ fn zirIsNonNullPtr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); - try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2()); + try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod)); if ((try sema.resolveMaybeUndefVal(ptr)) == null) { return block.addUnOp(.is_non_null_ptr, ptr); } @@ -16963,7 +16984,8 @@ fn zirIsNonNullPtr( } fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion, .Undefined => return, else => return sema.fail(block, src, "expected error union type, found '{}'", .{ ty.fmt(sema.mod), @@ -16986,10 +17008,11 @@ fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); - try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2()); + try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod)); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } @@ -17012,6 +17035,7 @@ fn zirCondbr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); @@ -17052,7 +17076,7 @@ fn zirCondbr( const err_inst_data = sema.code.instructions.items(.data)[index].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); - assert(operand_ty.zigTypeTag() == .ErrorUnion); + assert(operand_ty.zigTypeTag(mod) == .ErrorUnion); const result_ty = operand_ty.errorUnionSet(); break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand); }; @@ -17079,7 +17103,7 @@ fn zirCondbr( return always_noreturn; } -fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref { +fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -17087,7 +17111,8 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + const mod = sema.mod; + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); @@ -17124,7 +17149,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! return try_inst; } -fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref { +fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -17133,7 +17158,8 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand = try sema.resolveInst(extra.data.operand); const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + const mod = sema.mod; + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ err_union_ty.fmt(sema.mod), }); @@ -17275,16 +17301,17 @@ fn zirRetImplicit( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const operand = try sema.resolveInst(inst_data.operand); const r_brace_src = inst_data.src(); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const base_tag = sema.fn_ret_ty.baseZigTypeTag(); + const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod); if (base_tag == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "function declared '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(sema.mod), + sema.fn_ret_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -17294,7 +17321,7 @@ fn zirRetImplicit( } else if (base_tag != .Void) { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "function with non-void return type '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(sema.mod), + sema.fn_ret_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -17397,17 +17424,19 @@ fn retWithErrTracing( } fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool { - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return false; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.error_return_trace)) return false; - return fn_ret_ty.isError() and - sema.mod.comp.bin_file.options.error_return_tracing; + return fn_ret_ty.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; } fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].save_err_ret_index; - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; - if (!sema.mod.comp.bin_file.options.error_return_tracing) return; + if (!mod.backendSupportsFeature(.error_return_trace)) return; + if (!mod.comp.bin_file.options.error_return_tracing) return; // This is only relevant at runtime. if (block.is_comptime or block.is_typeof) return; @@ -17415,7 +17444,7 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const save_index = inst_data.operand == .none or b: { const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - break :b operand_ty.isError(); + break :b operand_ty.isError(mod); }; if (save_index) @@ -17467,11 +17496,12 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) } fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { - assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion); + const mod = sema.mod; + assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { const op_ty = sema.typeOf(uncasted_operand); - switch (op_ty.zigTypeTag()) { + switch (op_ty.zigTypeTag(mod)) { .ErrorSet => { try payload.data.addErrorSet(sema.gpa, op_ty); }, @@ -17492,7 +17522,8 @@ fn analyzeRet( // Special case for returning an error to an inferred error set; we need to // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. - if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) { + const mod = sema.mod; + if (sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(uncasted_operand); } const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) { @@ -17540,6 +17571,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].ptr_type; const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); const elem_ty_src: LazySrcLoc = .{ .node_offset_ptr_elem = extra.data.src_node }; @@ -17582,7 +17614,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air break :blk 0; } } - const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); try sema.validateAlign(block, align_src, abi_align); break :blk abi_align; } else 0; @@ -17591,7 +17623,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer); - } else if (elem_ty.zigTypeTag() == .Fn and target.cpu.arch == .avr) .flash else .generic; + } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic; const bit_offset = if (inst_data.flags.has_bit_range) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); @@ -17611,9 +17643,9 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{}); } - if (elem_ty.zigTypeTag() == .NoReturn) { + if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag() == .Fn) { + } else if (elem_ty.zigTypeTag(mod) == .Fn) { if (inst_data.size != .One) { return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } @@ -17623,7 +17655,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air { return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{}); } - } else if (inst_data.size == .Many and elem_ty.zigTypeTag() == .Opaque) { + } else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{}); } else if (inst_data.size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { @@ -17639,7 +17671,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); } - if (elem_ty.zigTypeTag() == .Opaque) { + if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{}); } } @@ -17666,8 +17698,9 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const obj_ty = try sema.resolveType(block, src, inst_data.operand); + const mod = sema.mod; - switch (obj_ty.zigTypeTag()) { + switch (obj_ty.zigTypeTag(mod)) { .Struct => return sema.structInitEmpty(block, obj_ty, src, src), .Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty), .Void => return sema.addConstant(obj_ty, Value.void), @@ -17696,9 +17729,10 @@ fn structInitEmpty( } fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref { + const mod = sema.mod; const arr_len = obj_ty.arrayLen(); if (arr_len != 0) { - if (obj_ty.zigTypeTag() == .Array) { + if (obj_ty.zigTypeTag(mod) == .Array) { return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len}); } else { return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); @@ -17766,13 +17800,14 @@ fn zirStructInit( const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); const src = inst_data.src(); + const mod = sema.mod; const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; const first_field_type_data = zir_datas[first_item.field_type].pl_node; const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data; const resolved_ty = try sema.resolveType(block, src, first_field_type_extra.container_type); try sema.resolveTypeLayout(resolved_ty); - if (resolved_ty.zigTypeTag() == .Struct) { + if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. // Maps field index to field_type index of where it was already initialized. @@ -17815,7 +17850,7 @@ fn zirStructInit( } found_fields[field_index] = item.data.field_type; field_inits[field_index] = try sema.resolveInst(item.data.init); - if (!is_packed) if (resolved_ty.structFieldValueComptime(field_index)) |default_value| { + if (!is_packed) if (resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; @@ -17827,7 +17862,7 @@ fn zirStructInit( } return sema.finishStructInit(block, src, src, field_inits, resolved_ty, is_ref); - } else if (resolved_ty.zigTypeTag() == .Union) { + } else if (resolved_ty.zigTypeTag(mod) == .Union) { if (extra.data.fields_len != 1) { return sema.fail(block, src, "union initialization expects exactly one field", .{}); } @@ -18014,6 +18049,7 @@ fn zirStructInitAnon( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); @@ -18050,7 +18086,7 @@ fn zirStructInitAnon( const init = try sema.resolveInst(item.data.init); field_ty.* = sema.typeOf(init); - if (types[i].zigTypeTag() == .Opaque) { + if (types[i].zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); @@ -18148,15 +18184,16 @@ fn zirArrayInit( const array_ty = try sema.resolveType(block, src, args[0]); const sentinel_val = array_ty.sentinel(); + const mod = sema.mod; const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null)); defer gpa.free(resolved_args); for (args[1..], 0..) |arg, i| { const resolved_arg = try sema.resolveInst(arg); - const elem_ty = if (array_ty.zigTypeTag() == .Struct) + const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) array_ty.structFieldType(i) else - array_ty.elemType2(); + array_ty.elemType2(mod); resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); @@ -18169,7 +18206,7 @@ fn zirArrayInit( } if (sentinel_val) |some| { - resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(), some); + resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(mod), some); } const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| { @@ -18227,7 +18264,7 @@ fn zirArrayInit( const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = array_ty.elemType2(), + .pointee_type = array_ty.elemType2(mod), }); const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty); @@ -18252,6 +18289,7 @@ fn zirArrayInitAnon( const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const operands = sema.code.refSlice(extra.end, extra.data.operands_len); + const mod = sema.mod; const types = try sema.arena.alloc(Type, operands.len); const values = try sema.arena.alloc(Value, operands.len); @@ -18262,7 +18300,7 @@ fn zirArrayInitAnon( const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); types[i] = sema.typeOf(elem); - if (types[i].zigTypeTag() == .Opaque) { + if (types[i].zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); @@ -18379,11 +18417,12 @@ fn fieldType( field_src: LazySrcLoc, ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; var cur_ty = aggregate_ty; while (true) { const resolved_ty = try sema.resolveTypeFields(cur_ty); cur_ty = resolved_ty; - switch (cur_ty.zigTypeTag()) { + switch (cur_ty.zigTypeTag(mod)) { .Struct => { if (cur_ty.isAnonStruct()) { const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); @@ -18449,14 +18488,14 @@ fn zirFrame( } fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); if (ty.isNoReturn()) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } - const target = sema.mod.getTarget(); - const val = try ty.lazyAbiAlignment(target, sema.arena); + const val = try ty.lazyAbiAlignment(mod, sema.arena); if (val.tag() == .lazy_align) { try sema.queueFullTypeResolution(ty); } @@ -18499,16 +18538,17 @@ fn zirUnaryMath( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, .Vector => { - const scalar_ty = operand_ty.scalarType(); - switch (scalar_ty.zigTypeTag()) { + const scalar_ty = operand_ty.scalarType(mod); + switch (scalar_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{scalar_ty.fmt(sema.mod)}), } @@ -18516,9 +18556,9 @@ fn zirUnaryMath( else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(sema.mod)}), } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Vector => { - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const vec_len = operand_ty.vectorLen(); const result_ty = try Type.vector(sema.arena, vec_len, scalar_ty); if (try sema.resolveMaybeUndefVal(operand)) |val| { @@ -18564,7 +18604,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const mod = sema.mod; try sema.resolveTypeLayout(operand_ty); - const enum_ty = switch (operand_ty.zigTypeTag()) { + const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); const bytes = val.castTag(.enum_literal).?.data; @@ -18654,11 +18694,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const bits_val = struct_val[1]; const signedness = signedness_val.toEnum(std.builtin.Signedness); - const bits = @intCast(u16, bits_val.toUnsignedInt(target)); - const ty = switch (signedness) { - .signed => try Type.Tag.int_signed.create(sema.arena, bits), - .unsigned => try Type.Tag.int_unsigned.create(sema.arena, bits), - }; + const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); + const ty = try mod.intType(signedness, bits); return sema.addType(ty); }, .Vector => { @@ -18667,9 +18704,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const len_val = struct_val[0]; const child_val = struct_val[1]; - const len = len_val.toUnsignedInt(target); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = child_val.toType(&buffer); + const len = len_val.toUnsignedInt(mod); + const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); @@ -18682,7 +18718,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // bits: comptime_int, const bits_val = struct_val[0]; - const bits = @intCast(u16, bits_val.toUnsignedInt(target)); + const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -18708,10 +18744,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?); - var buffer: Value.ToTypeBuffer = undefined; - const unresolved_elem_ty = child_val.toType(&buffer); + const unresolved_elem_ty = child_val.toType(); const elem_ty = if (abi_align == 0) unresolved_elem_ty else t: { @@ -18723,7 +18758,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const ptr_size = size_val.toEnum(std.builtin.Type.Pointer.Size); var actual_sentinel: ?Value = null; - if (!sentinel_val.isNull()) { + if (!sentinel_val.isNull(mod)) { if (ptr_size == .One or ptr_size == .C) { return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); } @@ -18735,9 +18770,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in actual_sentinel = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; } - if (elem_ty.zigTypeTag() == .NoReturn) { + if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag() == .Fn) { + } else if (elem_ty.zigTypeTag(mod) == .Fn) { if (ptr_size != .One) { return sema.fail(block, src, "function pointers must be single pointers", .{}); } @@ -18747,7 +18782,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in { return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{}); } - } else if (ptr_size == .Many and elem_ty.zigTypeTag() == .Opaque) { + } else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{}); } else if (ptr_size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { @@ -18763,7 +18798,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in }; return sema.failWithOwnedErrorMsg(msg); } - if (elem_ty.zigTypeTag() == .Opaque) { + if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "C pointers cannot point to opaque types", .{}); } } @@ -18790,9 +18825,8 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // sentinel: ?*const anyopaque, const sentinel_val = struct_val[2]; - const len = len_val.toUnsignedInt(target); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = try child_val.toType(&buffer).copy(sema.arena); + const len = len_val.toUnsignedInt(mod); + const child_ty = try child_val.toType().copy(sema.arena); const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, @@ -18810,8 +18844,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // child: type, const child_val = struct_val[0]; - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = try child_val.toType(&buffer).copy(sema.arena); + const child_ty = try child_val.toType().copy(sema.arena); const ty = try Type.optional(sema.arena, child_ty); return sema.addType(ty); @@ -18824,11 +18857,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // payload: type, const payload_val = struct_val[1]; - var buffer: Value.ToTypeBuffer = undefined; - const error_set_ty = try error_set_val.toType(&buffer).copy(sema.arena); - const payload_ty = try payload_val.toType(&buffer).copy(sema.arena); + const error_set_ty = try error_set_val.toType().copy(sema.arena); + const payload_ty = try payload_val.toType().copy(sema.arena); - if (error_set_ty.zigTypeTag() != .ErrorSet) { + if (error_set_ty.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{}); } @@ -18839,11 +18871,11 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.addType(ty); }, .ErrorSet => { - const payload_val = union_val.val.optionalValue() orelse + const payload_val = union_val.val.optionalValue(mod) orelse return sema.addType(Type.initTag(.anyerror)); const slice_val = payload_val.castTag(.slice).?.data; - const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod.getTarget())); + const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod)); var names: Module.ErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); var i: usize = 0; @@ -18890,7 +18922,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "reified structs must have no decls", .{}); } - if (layout != .Packed and !backing_int_val.isNull()) { + if (layout != .Packed and !backing_int_val.isNull(mod)) { return sema.fail(block, src, "non-packed struct does not support backing integer type", .{}); } @@ -18954,10 +18986,9 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in }; // Enum tag type - var buffer: Value.ToTypeBuffer = undefined; - const int_tag_ty = try tag_type_val.toType(&buffer).copy(new_decl_arena_allocator); + const int_tag_ty = try tag_type_val.toType().copy(new_decl_arena_allocator); - if (int_tag_ty.zigTypeTag() != .Int) { + if (int_tag_ty.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); } enum_obj.tag_ty = int_tag_ty; @@ -19090,7 +19121,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const new_decl_arena_allocator = new_decl_arena.allocator(); const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (!tag_type_val.isNull()) + const type_tag = if (!tag_type_val.isNull(mod)) Type.Tag.union_tagged else if (layout != .Auto) Type.Tag.@"union" @@ -19130,11 +19161,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var tag_ty_field_names: ?Module.EnumFull.NameMap = null; var enum_field_names: ?*Module.EnumNumbered.NameMap = null; const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - if (tag_type_val.optionalValue()) |payload_val| { - var buffer: Value.ToTypeBuffer = undefined; - union_obj.tag_ty = try payload_val.toType(&buffer).copy(new_decl_arena_allocator); + if (tag_type_val.optionalValue(mod)) |payload_val| { + union_obj.tag_ty = try payload_val.toType().copy(new_decl_arena_allocator); - if (union_obj.tag_ty.zigTypeTag() != .Enum) { + if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) { return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}); } tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); @@ -19187,14 +19217,13 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "duplicate union field {s}", .{field_name}); } - var buffer: Value.ToTypeBuffer = undefined; - const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator); + const field_ty = try type_val.toType().copy(new_decl_arena_allocator); gop.value_ptr.* = .{ .ty = field_ty, - .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?), + .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?), }; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); errdefer msg.destroy(sema.gpa); @@ -19216,7 +19245,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19280,20 +19309,18 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment = @intCast(u29, alignment_val.toUnsignedInt(target)); + const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod)); if (alignment == target_util.defaultFunctionAlignment(target)) { break :alignment 0; } else { break :alignment alignment; } }; - const return_type = return_type_val.optionalValue() orelse + const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); - var buf: Value.ToTypeBuffer = undefined; - const args_slice_val = args_val.castTag(.slice).?.data; - const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod.getTarget())); + const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod)); const param_types = try sema.arena.alloc(Type, args_len); const comptime_params = try sema.arena.alloc(bool, args_len); @@ -19316,12 +19343,12 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{}); } - const param_type_val = param_type_opt_val.optionalValue() orelse + const param_type_val = param_type_opt_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - const param_type = try param_type_val.toType(&buf).copy(sema.arena); + const param_type = try param_type_val.toType().copy(sema.arena); if (arg_is_noalias) { - if (!param_type.isPtrAtRuntime()) { + if (!param_type.isPtrAtRuntime(mod)) { return sema.fail(block, src, "non-pointer parameter declared noalias", .{}); } noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse @@ -19336,7 +19363,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in .param_types = param_types, .comptime_params = comptime_params.ptr, .noalias_bits = noalias_bits, - .return_type = try return_type.toType(&buf).copy(sema.arena), + .return_type = try return_type.toType().copy(sema.arena), .alignment = alignment, .cc = cc, .is_var_args = is_var_args, @@ -19396,8 +19423,6 @@ fn reifyStruct( }, }; - const target = mod.getTarget(); - // Fields const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); @@ -19420,7 +19445,7 @@ fn reifyStruct( if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?); if (layout == .Packed) { if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{}); @@ -19461,7 +19486,7 @@ fn reifyStruct( return sema.fail(block, src, "duplicate struct field {s}", .{field_name}); } - const default_val = if (default_value_val.optionalValue()) |opt_val| blk: { + const default_val = if (default_value_val.optionalValue(mod)) |opt_val| blk: { const payload_val = if (opt_val.pointerDecl()) |opt_decl| mod.declPtr(opt_decl).val else @@ -19472,8 +19497,7 @@ fn reifyStruct( return sema.fail(block, src, "comptime field without default initialization value", .{}); } - var buffer: Value.ToTypeBuffer = undefined; - const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator); + const field_ty = try type_val.toType().copy(new_decl_arena_allocator); gop.value_ptr.* = .{ .ty = field_ty, .abi_align = abi_align, @@ -19482,7 +19506,7 @@ fn reifyStruct( .offset = undefined, }; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); @@ -19492,7 +19516,7 @@ fn reifyStruct( }; return sema.failWithOwnedErrorMsg(msg); } - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{}); errdefer msg.destroy(sema.gpa); @@ -19514,7 +19538,7 @@ fn reifyStruct( break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19545,20 +19569,15 @@ fn reifyStruct( var fields_bit_sum: u64 = 0; for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(target); + fields_bit_sum += field.ty.bitSize(mod); } - if (backing_int_val.optionalValue()) |payload| { - var buf: Value.ToTypeBuffer = undefined; - const backing_int_ty = payload.toType(&buf); + if (backing_int_val.optionalValue(mod)) |payload| { + const backing_int_ty = payload.toType(); try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator); } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, fields_bit_sum), - }; - struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(new_decl_arena_allocator); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } struct_obj.status = .have_layout; @@ -19569,6 +19588,7 @@ fn reifyStruct( } fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const addrspace_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19594,7 +19614,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst ptr_info.@"addrspace" = dest_addrspace; const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); - const dest_ty = if (ptr_ty.zigTypeTag() == .Optional) + const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional) try Type.optional(sema.arena, dest_ptr_ty) else dest_ptr_ty; @@ -19716,6 +19736,7 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19730,12 +19751,12 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(operand)) |val| { const result_val = try sema.floatToInt(block, operand_src, val, operand_ty, dest_ty); return sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeInt) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_int' must be comptime-known"); } try sema.requireRuntimeBlock(block, inst_data.src(), operand_src); - if (dest_ty.intInfo(sema.mod.getTarget()).bits == 0) { + if (dest_ty.intInfo(mod).bits == 0) { if (block.wantSafety()) { const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, Value.zero)); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); @@ -19755,6 +19776,7 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19769,7 +19791,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(operand)) |val| { const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema); return sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -19778,6 +19800,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -19790,9 +19813,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_ty = try sema.resolveType(block, src, extra.lhs); try sema.checkPtrType(block, type_src, ptr_ty); - const elem_ty = ptr_ty.elemType2(); - const target = sema.mod.getTarget(); - const ptr_align = try ptr_ty.ptrAlignmentAdvanced(target, sema); + const elem_ty = ptr_ty.elemType2(mod); + const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); if (ptr_ty.isSlice()) { const msg = msg: { @@ -19805,8 +19827,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { - const addr = val.toUnsignedInt(target); - if (!ptr_ty.isAllowzeroPtr() and addr == 0) + const addr = val.toUnsignedInt(mod); + if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)}); if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)}); @@ -19820,8 +19842,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag() == .Fn)) { - if (!ptr_ty.isAllowzeroPtr()) { + if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) { + if (!ptr_ty.isAllowzeroPtr(mod)) { const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } @@ -19926,6 +19948,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19934,7 +19957,6 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); try sema.checkPtrType(block, dest_ty_src, dest_ty); try sema.checkPtrOperand(block, operand_src, operand_ty); @@ -19982,18 +20004,18 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else operand; - const dest_elem_ty = dest_ty.elemType2(); + const dest_elem_ty = dest_ty.elemType2(mod); try sema.resolveTypeLayout(dest_elem_ty); - const dest_align = dest_ty.ptrAlignment(target); + const dest_align = dest_ty.ptrAlignment(mod); - const operand_elem_ty = operand_ty.elemType2(); + const operand_elem_ty = operand_ty.elemType2(mod); try sema.resolveTypeLayout(operand_elem_ty); - const operand_align = operand_ty.ptrAlignment(target); + const operand_align = operand_ty.ptrAlignment(mod); // If the destination is less aligned than the source, preserve the source alignment const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: { // Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result - if (dest_ty.zigTypeTag() == .Optional) { + if (dest_ty.zigTypeTag(mod) == .Optional) { var buf: Type.Payload.ElemType = undefined; var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data; dest_ptr_info.@"align" = operand_align; @@ -20006,8 +20028,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; if (dest_is_slice) { - const operand_elem_size = operand_elem_ty.abiSize(target); - const dest_elem_size = dest_elem_ty.abiSize(target); + const operand_elem_size = operand_elem_ty.abiSize(mod); + const dest_elem_size = dest_elem_ty.abiSize(mod); if (operand_elem_size != dest_elem_size) { return sema.fail(block, dest_ty_src, "TODO: implement @ptrCast between slices changing the length", .{}); } @@ -20032,21 +20054,21 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| { - if (!dest_ty.ptrAllowsZero() and operand_val.isUndef()) { + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef()) { return sema.failWithUseOfUndef(block, operand_src); } - if (!dest_ty.ptrAllowsZero() and operand_val.isNull()) { + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } - if (dest_ty.zigTypeTag() == .Optional and sema.typeOf(ptr).zigTypeTag() != .Optional) { + if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) { return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val)); } return sema.addConstant(aligned_dest_ty, operand_val); } try sema.requireRuntimeBlock(block, src, null); - if (block.wantSafety() and operand_ty.ptrAllowsZero() and !dest_ty.ptrAllowsZero() and - (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn)) + if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and + (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { const ptr_int = try block.addUnOp(.ptrtoint, ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); @@ -20102,6 +20124,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD } fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -20112,7 +20135,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_scalar_ty); const operand_ty = sema.typeOf(operand); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); - const is_vector = operand_ty.zigTypeTag() == .Vector; + const is_vector = operand_ty.zigTypeTag(mod) == .Vector; const dest_ty = if (is_vector) try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty) else @@ -20122,15 +20145,14 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.coerce(block, dest_ty, operand, operand_src); } - const target = sema.mod.getTarget(); - const dest_info = dest_scalar_ty.intInfo(target); + const dest_info = dest_scalar_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(dest_ty)) |val| { return sema.addConstant(dest_ty, val); } - if (operand_scalar_ty.zigTypeTag() != .ComptimeInt) { - const operand_info = operand_ty.intInfo(target); + if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) { + const operand_info = operand_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return sema.addConstant(operand_ty, val); } @@ -20186,6 +20208,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -20199,12 +20222,12 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var ptr_info = ptr_ty.ptrInfo().data; ptr_info.@"align" = dest_align; var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); - if (ptr_ty.zigTypeTag() == .Optional) { + if (ptr_ty.zigTypeTag(mod) == .Optional) { dest_ty = try Type.Tag.optional.create(sema.arena, dest_ty); } if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |val| { - if (try val.getUnsignedIntAdvanced(sema.mod.getTarget(), null)) |addr| { + if (try val.getUnsignedIntAdvanced(mod, null)) |addr| { if (addr % dest_align != 0) { return sema.fail(block, ptr_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align }); } @@ -20247,23 +20270,23 @@ fn zirBitCount( block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, - comptime comptimeOp: fn (val: Value, ty: Type, target: std.Target) u64, + comptime comptimeOp: fn (val: Value, ty: Type, mod: *const Module) u64, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); _ = try sema.checkIntOrVector(block, operand, operand_src); - const target = sema.mod.getTarget(); - const bits = operand_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return sema.addConstant(operand_ty, val); } - const result_scalar_ty = try Type.smallestUnsignedInt(sema.arena, bits); - switch (operand_ty.zigTypeTag()) { + const result_scalar_ty = try mod.smallestUnsignedInt(bits); + switch (operand_ty.zigTypeTag(mod)) { .Vector => { const vec_len = operand_ty.vectorLen(); const result_ty = try Type.vector(sema.arena, vec_len, result_scalar_ty); @@ -20272,10 +20295,10 @@ fn zirBitCount( var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - const count = comptimeOp(elem_val, scalar_ty, target); + const count = comptimeOp(elem_val, scalar_ty, mod); elem.* = try Value.Tag.int_u64.create(sema.arena, count); } return sema.addConstant( @@ -20291,7 +20314,7 @@ fn zirBitCount( if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_scalar_ty); try sema.resolveLazyValue(val); - return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, target)); + return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod)); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_scalar_ty, operand); @@ -20302,14 +20325,14 @@ fn zirBitCount( } fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src); - const target = sema.mod.getTarget(); - const bits = scalar_ty.intInfo(target).bits; + const bits = scalar_ty.intInfo(mod).bits; if (bits % 8 != 0) { return sema.fail( block, @@ -20323,11 +20346,11 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant(operand_ty, val); } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try val.byteSwap(operand_ty, target, sema.arena); + const result_val = try val.byteSwap(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20344,7 +20367,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.byteSwap(operand_ty, target, sema.arena); + elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena); } return sema.addConstant( operand_ty, @@ -20371,12 +20394,12 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstant(operand_ty, val); } - const target = sema.mod.getTarget(); - switch (operand_ty.zigTypeTag()) { + const mod = sema.mod; + switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try val.bitReverse(operand_ty, target, sema.arena); + const result_val = try val.bitReverse(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20393,7 +20416,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena); + elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena); } return sema.addConstant( operand_ty, @@ -20429,10 +20452,10 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const ty = try sema.resolveType(block, lhs_src, extra.lhs); const field_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "name of field must be comptime-known"); - const target = sema.mod.getTarget(); + const mod = sema.mod; try sema.resolveTypeLayout(ty); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => {}, else => { const msg = msg: { @@ -20464,15 +20487,16 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 if (i == field_index) { return bit_sum; } - bit_sum += field.ty.bitSize(target); + bit_sum += field.ty.bitSize(mod); } else unreachable; }, - else => return ty.structFieldOffset(field_index, target) * 8, + else => return ty.structFieldOffset(field_index, mod) * 8, } } fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Struct, .Enum, .Union, .Opaque => return, else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}), } @@ -20480,7 +20504,8 @@ fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Com /// Returns `true` if the type was a comptime_int. fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { - switch (try ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try ty.zigTypeTagOrPoison(mod)) { .ComptimeInt => return true, .Int => return false, else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}), @@ -20493,7 +20518,8 @@ fn checkInvalidPtrArithmetic( src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (try ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try ty.zigTypeTagOrPoison(mod)) { .Pointer => switch (ty.ptrSize()) { .One, .Slice => return, .Many, .C => return sema.fail( @@ -20532,7 +20558,8 @@ fn checkPtrOperand( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { @@ -20550,7 +20577,7 @@ fn checkPtrOperand( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional()) return, + .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); @@ -20562,7 +20589,8 @@ fn checkPtrType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { @@ -20580,7 +20608,7 @@ fn checkPtrType( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional()) return, + .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); @@ -20592,9 +20620,10 @@ fn checkVectorElemType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Int, .Float, .Bool => return, - else => if (ty.isPtrAtRuntime()) return, + else => if (ty.isPtrAtRuntime(mod)) return, } return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)}); } @@ -20605,7 +20634,8 @@ fn checkFloatType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat, .Float => {}, else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}), } @@ -20617,9 +20647,10 @@ fn checkNumericType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, @@ -20637,9 +20668,9 @@ fn checkAtomicPtrOperand( ptr_src: LazySrcLoc, ptr_const: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); - var diag: target_util.AtomicPtrAlignmentDiagnostics = .{}; - const alignment = target_util.atomicPtrAlignment(target, elem_ty, &diag) catch |err| switch (err) { + const mod = sema.mod; + var diag: Module.AtomicPtrAlignmentDiagnostics = .{}; + const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) { error.FloatTooBig => return sema.fail( block, elem_ty_src, @@ -20668,7 +20699,7 @@ fn checkAtomicPtrOperand( }; const ptr_ty = sema.typeOf(ptr); - const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison()) { + const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo().data, else => { const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); @@ -20735,12 +20766,13 @@ fn checkIntOrVector( operand: Air.Inst.Ref, operand_src: LazySrcLoc, ) CompileError!Type { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - switch (try operand_ty.zigTypeTagOrPoison()) { + switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int => return operand_ty, .Vector => { const elem_ty = operand_ty.childType(); - switch (try elem_ty.zigTypeTagOrPoison()) { + switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ elem_ty.fmt(sema.mod), @@ -20759,11 +20791,12 @@ fn checkIntOrVectorAllowComptime( operand_ty: Type, operand_src: LazySrcLoc, ) CompileError!Type { - switch (try operand_ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return operand_ty, .Vector => { const elem_ty = operand_ty.childType(); - switch (try elem_ty.zigTypeTagOrPoison()) { + switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ elem_ty.fmt(sema.mod), @@ -20777,7 +20810,8 @@ fn checkIntOrVectorAllowComptime( } fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ErrorSet => return, else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}), } @@ -20805,11 +20839,12 @@ fn checkSimdBinOp( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!SimdBinOp { + const mod = sema.mod; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - var vec_len: ?usize = if (lhs_ty.zigTypeTag() == .Vector) lhs_ty.vectorLen() else null; + var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen() else null; const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); @@ -20823,7 +20858,7 @@ fn checkSimdBinOp( .lhs_val = try sema.resolveMaybeUndefVal(lhs), .rhs_val = try sema.resolveMaybeUndefVal(rhs), .result_ty = result_ty, - .scalar_ty = result_ty.scalarType(), + .scalar_ty = result_ty.scalarType(mod), }; } @@ -20836,8 +20871,9 @@ fn checkVectorizableBinaryOperands( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!void { - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const mod = sema.mod; + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return; const lhs_is_vector = switch (lhs_zig_ty_tag) { @@ -20892,6 +20928,7 @@ fn resolveExportOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExportOptions { + const mod = sema.mod; const export_options_ty = try sema.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -20904,7 +20941,7 @@ fn resolveExportOptions( const name_operand = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); const name_ty = Type.initTag(.const_slice_u8); - const name = try name_val.toAllocatedBytes(name_ty, sema.arena, sema.mod); + const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known"); @@ -20913,8 +20950,8 @@ fn resolveExportOptions( const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); const section_ty = Type.initTag(.const_slice_u8); - const section = if (section_opt_val.optionalValue()) |section_val| - try section_val.toAllocatedBytes(section_ty, sema.arena, sema.mod) + const section = if (section_opt_val.optionalValue(mod)) |section_val| + try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else null; @@ -20979,6 +21016,7 @@ fn zirCmpxchg( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data; const air_tag: Air.Inst.Tag = switch (extended.small) { 0 => .cmpxchg_weak, @@ -20996,7 +21034,7 @@ fn zirCmpxchg( // zig fmt: on const expected_value = try sema.resolveInst(extra.expected_value); const elem_ty = sema.typeOf(expected_value); - if (elem_ty.zigTypeTag() == .Float) { + if (elem_ty.zigTypeTag(mod) == .Float) { return sema.fail( block, elem_ty_src, @@ -21102,26 +21140,26 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operation = try sema.resolveBuiltinEnum(block, op_src, extra.lhs, "ReduceOp", "@reduce operation must be comptime-known"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); + const mod = sema.mod; - if (operand_ty.zigTypeTag() != .Vector) { - return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(sema.mod)}); + if (operand_ty.zigTypeTag(mod) != .Vector) { + return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)}); } const scalar_ty = operand_ty.childType(); // Type-check depending on operation. switch (operation) { - .And, .Or, .Xor => switch (scalar_ty.zigTypeTag()) { + .And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Bool => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(sema.mod), + @tagName(operation), operand_ty.fmt(mod), }), }, - .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag()) { + .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Float => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(sema.mod), + @tagName(operation), operand_ty.fmt(mod), }), }, } @@ -21136,19 +21174,19 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); - var accum: Value = try operand_val.elemValue(sema.mod, sema.arena, 0); + var accum: Value = try operand_val.elemValue(mod, sema.arena, 0); var elem_buf: Value.ElemValueBuffer = undefined; var i: u32 = 1; while (i < vec_len) : (i += 1) { - const elem_val = operand_val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = operand_val.elemValueBuffer(mod, i, &elem_buf); switch (operation) { - .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, sema.mod), - .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, sema.mod), - .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, sema.mod), - .Min => accum = accum.numberMin(elem_val, target), - .Max => accum = accum.numberMax(elem_val, target), + .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod), + .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod), + .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod), + .Min => accum = accum.numberMin(elem_val, mod), + .Max => accum = accum.numberMax(elem_val, mod), .Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty), - .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, sema.mod), + .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod), } } return sema.addConstant(scalar_ty, accum); @@ -21165,6 +21203,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data; const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -21177,7 +21216,7 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var mask = try sema.resolveInst(extra.mask); var mask_ty = sema.typeOf(mask); - const mask_len = switch (sema.typeOf(mask).zigTypeTag()) { + const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(mask).arrayLen(), else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), }; @@ -21200,6 +21239,7 @@ fn analyzeShuffle( mask: Value, mask_len: u32, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = src_node }; const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = src_node }; const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = src_node }; @@ -21211,7 +21251,7 @@ fn analyzeShuffle( .elem_type = elem_ty, }); - var maybe_a_len = switch (sema.typeOf(a).zigTypeTag()) { + var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(a).arrayLen(), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ @@ -21219,7 +21259,7 @@ fn analyzeShuffle( sema.typeOf(a).fmt(sema.mod), }), }; - var maybe_b_len = switch (sema.typeOf(b).zigTypeTag()) { + var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) { .Array, .Vector => sema.typeOf(b).arrayLen(), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ @@ -21255,7 +21295,7 @@ fn analyzeShuffle( var buf: Value.ElemValueBuffer = undefined; const elem = mask.elemValueBuffer(sema.mod, i, &buf); if (elem.isUndef()) continue; - const int = elem.toSignedInt(sema.mod.getTarget()); + const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; var chosen: u32 = undefined; if (int >= 0) { @@ -21297,7 +21337,7 @@ fn analyzeShuffle( values[i] = Value.undef; continue; } - const int = mask_elem_val.toSignedInt(sema.mod.getTarget()); + const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); if (int >= 0) { values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned); @@ -21356,6 +21396,7 @@ fn analyzeShuffle( } fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -21369,7 +21410,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const pred_uncoerced = try sema.resolveInst(extra.pred); const pred_ty = sema.typeOf(pred_uncoerced); - const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison()) { + const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { .Vector, .Array => pred_ty.arrayLen(), else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), }; @@ -21489,6 +21530,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = inst_data.src(); @@ -21505,7 +21547,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Enum => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); }, @@ -21536,7 +21578,6 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A break :rs operand_src; }; if (ptr_val.isComptimeMutablePtr()) { - const target = sema.mod.getTarget(); const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { @@ -21544,12 +21585,12 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Xchg => operand_val, .Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty), .Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty), - .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, sema.mod), - .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, sema.mod), - .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, sema.mod), - .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, sema.mod), - .Max => stored_val.numberMax (operand_val, target), - .Min => stored_val.numberMin (operand_val, target), + .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod), + .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod), + .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod), + .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod), + .Max => stored_val.numberMax (operand_val, mod), + .Min => stored_val.numberMin (operand_val, mod), // zig fmt: on }; try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty); @@ -21623,8 +21664,9 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1); const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2); const maybe_addend = try sema.resolveMaybeUndefVal(addend); + const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .Vector => {}, else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}), } @@ -21743,7 +21785,6 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const callee_ty = sema.typeOf(func); const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false); - const ensure_result_used = extra.flags.ensure_result_used; return sema.analyzeCall(block, func, func_ty, func_src, call_src, modifier, ensure_result_used, resolved_args, null, null); } @@ -21760,13 +21801,14 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known"); const field_ptr = try sema.resolveInst(extra.field_ptr); const field_ptr_ty = sema.typeOf(field_ptr); + const mod = sema.mod; - if (parent_ty.zigTypeTag() != .Struct and parent_ty.zigTypeTag() != .Union) { + if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) { return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)}); } try sema.resolveTypeLayout(parent_ty); - const field_index = switch (parent_ty.zigTypeTag()) { + const field_index = switch (parent_ty.zigTypeTag(mod)) { .Struct => blk: { if (parent_ty.isTuple()) { if (mem.eql(u8, field_name, "len")) { @@ -21781,7 +21823,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr else => unreachable, }; - if (parent_ty.zigTypeTag() == .Struct and parent_ty.structFieldIsComptime(field_index)) { + if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index)) { return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{}); } @@ -21913,15 +21955,14 @@ fn analyzeMinMax( ) CompileError!Air.Inst.Ref { assert(operands.len == operand_srcs.len); assert(operands.len > 0); + const mod = sema.mod; if (operands.len == 1) return operands[0]; - const mod = sema.mod; - const target = mod.getTarget(); const opFunc = switch (air_tag) { .min => Value.numberMin, .max => Value.numberMax, - else => unreachable, + else => @compileError("unreachable"), }; // First, find all comptime-known arguments, and get their min/max @@ -21949,7 +21990,7 @@ fn analyzeMinMax( try sema.resolveLazyValue(operand_val); const vec_len = simd_op.len orelse { - const result_val = opFunc(cur_val, operand_val, target); + const result_val = opFunc(cur_val, operand_val, mod); cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; @@ -21959,7 +22000,7 @@ fn analyzeMinMax( for (elems, 0..) |*elem, i| { const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf); - elem.* = opFunc(lhs_elem_val, rhs_elem_val, target); + elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod); } cur_minmax = try sema.addConstant( simd_op.result_ty, @@ -21984,7 +22025,7 @@ fn analyzeMinMax( break :refined orig_ty; } - const refined_ty = if (orig_ty.zigTypeTag() == .Vector) blk: { + const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: { const elem_ty = orig_ty.childType(); const len = orig_ty.vectorLen(); @@ -21996,16 +22037,16 @@ fn analyzeMinMax( for (1..len) |idx| { const elem_val = try val.elemValue(mod, sema.arena, idx); if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef - if (Value.order(elem_val, cur_min, target).compare(.lt)) cur_min = elem_val; - if (Value.order(elem_val, cur_max, target).compare(.gt)) cur_max = elem_val; + if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val; + if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val; } - const refined_elem_ty = try Type.intFittingRange(target, sema.arena, cur_min, cur_max); + const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); break :blk try Type.vector(sema.arena, len, refined_elem_ty); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats if (val.isUndef()) break :blk orig_ty; // can't refine undef - break :blk try Type.intFittingRange(target, sema.arena, val, val); + break :blk try mod.intFittingRange(val, val); }; // Apply the refined type to the current value - this isn't strictly necessary in the @@ -22061,7 +22102,7 @@ fn analyzeMinMax( // Finally, refine the type based on the comptime-known bound. if (known_undef) break :refine; // can't refine undef const unrefined_ty = sema.typeOf(cur_minmax.?); - const is_vector = unrefined_ty.zigTypeTag() == .Vector; + const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector; const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty; const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty; @@ -22069,18 +22110,18 @@ fn analyzeMinMax( // Compute the final bounds based on the runtime type and the comptime-known bound type const min_val = switch (air_tag) { - .min => try unrefined_elem_ty.minInt(sema.arena, target), - .max => try comptime_elem_ty.minInt(sema.arena, target), // @max(ct, rt) >= ct + .min => try unrefined_elem_ty.minInt(sema.arena, mod), + .max => try comptime_elem_ty.minInt(sema.arena, mod), // @max(ct, rt) >= ct else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(sema.arena, target), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(sema.arena, target), + .min => try comptime_elem_ty.maxInt(sema.arena, mod), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(sema.arena, mod), else => unreachable, }; // Find the smallest type which can contain these bounds - const final_elem_ty = try Type.intFittingRange(target, sema.arena, min_val, max_val); + const final_elem_ty = try mod.intFittingRange(min_val, max_val); const final_ty = if (is_vector) try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty) @@ -22132,6 +22173,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr); const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr); const target = sema.mod.getTarget(); + const mod = sema.mod; if (dest_ty.isConstPtr()) { return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{}); @@ -22196,7 +22238,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { - const len_u64 = (try len_val.?.getUnsignedIntAdvanced(target, sema)).?; + const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { const elem_index = try sema.addIntUnsigned(Type.usize, i); @@ -22239,12 +22281,12 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // lowering. The AIR instruction requires pointers with element types of // equal ABI size. - if (dest_ty.zigTypeTag() != .Pointer or src_ty.zigTypeTag() != .Pointer) { + if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{}); } - const dest_elem_ty = dest_ty.elemType2(); - const src_elem_ty = src_ty.elemType2(); + const dest_elem_ty = dest_ty.elemType2(mod); + const src_elem_ty = src_ty.elemType2(mod); if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src)) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{}); } @@ -22255,7 +22297,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { - const len = val.toUnsignedInt(target); + const len = val.toUnsignedInt(mod); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; @@ -22320,6 +22362,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -22334,14 +22377,13 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void return sema.fail(block, dest_src, "cannot memset constant pointer", .{}); } - const dest_elem_ty = dest_ptr_ty.elemType2(); - const target = sema.mod.getTarget(); + const dest_elem_ty = dest_ptr_ty.elemType2(mod); const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: { const len_air_ref = try sema.fieldVal(block, src, dest_ptr, "len", dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; - const len_u64 = (try len_val.getUnsignedIntAdvanced(target, sema)).?; + const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. @@ -22499,9 +22541,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const align_src: LazySrcLoc = .{ .node_offset_fn_type_align = inst_data.src_node }; const addrspace_src: LazySrcLoc = .{ .node_offset_fn_type_addrspace = inst_data.src_node }; @@ -22535,7 +22578,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.tag() == .generic_poison) { break :blk null; } - const alignment = @intCast(u32, val.toUnsignedInt(target)); + const alignment = @intCast(u32, val.toUnsignedInt(mod)); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk 0; @@ -22551,7 +22594,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - const alignment = @intCast(u32, align_tv.val.toUnsignedInt(target)); + const alignment = @intCast(u32, align_tv.val.toUnsignedInt(mod)); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk 0; @@ -22642,8 +22685,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, "return type must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - const ty = try val.toType(&buffer).copy(sema.arena); + const ty = try val.toType().copy(sema.arena); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); @@ -22654,8 +22696,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - var buffer: Value.ToTypeBuffer = undefined; - const ty = try ret_ty_tv.val.toType(&buffer).copy(sema.arena); + const ty = try ret_ty_tv.val.toType().copy(sema.arena); break :blk ty; } else Type.void; @@ -22727,13 +22768,14 @@ fn zirCDefine( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const val_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; const name = try sema.resolveConstString(block, name_src, extra.lhs, "name of macro being undefined must be comptime-known"); const rhs = try sema.resolveInst(extra.rhs); - if (sema.typeOf(rhs).zigTypeTag() != .Void) { + if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) { const value = try sema.resolveConstString(block, val_src, extra.rhs, "value of macro being undefined must be comptime-known"); try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value }); } else { @@ -22799,9 +22841,9 @@ fn resolvePrefetchOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.PrefetchOptions { + const mod = sema.mod; const options_ty = try sema.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); - const target = sema.mod.getTarget(); const rw_src = sema.maybeOptionsSrc(block, src, "rw"); const locality_src = sema.maybeOptionsSrc(block, src, "locality"); @@ -22818,7 +22860,7 @@ fn resolvePrefetchOptions( return std.builtin.PrefetchOptions{ .rw = rw_val.toEnum(std.builtin.PrefetchOptions.Rw), - .locality = @intCast(u2, locality_val.toUnsignedInt(target)), + .locality = @intCast(u2, locality_val.toUnsignedInt(mod)), .cache = cache_val.toEnum(std.builtin.PrefetchOptions.Cache), }; } @@ -22887,7 +22929,7 @@ fn resolveExternOptions( const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); - const library_name = if (!library_name_val.isNull()) blk: { + const library_name = if (!library_name_val.isNull(mod)) blk: { const payload = library_name_val.castTag(.opt_payload).?.data; const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); if (library_name.len == 0) { @@ -22917,17 +22959,17 @@ fn zirBuiltinExtern( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; var ty = try sema.resolveType(block, ty_src, extra.lhs); - if (!ty.isPtrAtRuntime()) { + if (!ty.isPtrAtRuntime(mod)) { return sema.fail(block, ty_src, "expected (optional) pointer", .{}); } if (!try sema.validateExternType(ty.childType(), .other)) { const msg = msg: { - const mod = sema.mod; const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); @@ -22945,7 +22987,7 @@ fn zirBuiltinExtern( else => |e| return e, }; - if (options.linkage == .Weak and !ty.ptrAllowsZero()) { + if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) { ty = try Type.optional(sema.arena, ty); } @@ -23087,7 +23129,7 @@ fn validateVarType( const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), var_ty); - if (var_ty.zigTypeTag() == .ComptimeInt or var_ty.zigTypeTag() == .ComptimeFloat) { + if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) { try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{}); } @@ -23101,8 +23143,9 @@ fn validateRunTimeType( var_ty: Type, is_extern: bool, ) CompileError!bool { + const mod = sema.mod; var ty = var_ty; - while (true) switch (ty.zigTypeTag()) { + while (true) switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, @@ -23126,9 +23169,9 @@ fn validateRunTimeType( .Pointer => { const elem_ty = ty.childType(); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Opaque => return true, - .Fn => return elem_ty.isFnOrHasRuntimeBits(), + .Fn => return elem_ty.isFnOrHasRuntimeBits(mod), else => ty = elem_ty, } }, @@ -23174,7 +23217,7 @@ fn explainWhyTypeIsComptimeInner( type_set: *TypeSet, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, @@ -23211,8 +23254,8 @@ fn explainWhyTypeIsComptimeInner( try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); }, .Pointer => { - const elem_ty = ty.elemType2(); - if (elem_ty.zigTypeTag() == .Fn) { + const elem_ty = ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Fn) { const fn_info = elem_ty.fnInfo(); if (fn_info.is_generic) { try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{}); @@ -23221,7 +23264,7 @@ fn explainWhyTypeIsComptimeInner( .Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}), else => {}, } - if (fn_info.return_type.comptimeOnly()) { + if (fn_info.return_type.comptimeOnly(mod)) { try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{}); } return; @@ -23295,7 +23338,8 @@ fn validateExternType( ty: Type, position: ExternPosition, ) !bool { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, @@ -23314,7 +23358,7 @@ fn validateExternType( .AnyFrame, => return true, .Pointer => return !(ty.isSlice() or try sema.typeRequiresComptime(ty)), - .Int => switch (ty.intInfo(sema.mod.getTarget()).bits) { + .Int => switch (ty.intInfo(mod).bits) { 8, 16, 32, 64, 128 => return true, else => return false, }, @@ -23329,14 +23373,12 @@ fn validateExternType( return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention()); }, .Enum => { - var buf: Type.Payload.Bits = undefined; - return sema.validateExternType(ty.intTagType(&buf), position); + return sema.validateExternType(ty.intTagType(), position); }, .Struct, .Union => switch (ty.containerLayout()) { .Extern => return true, .Packed => { - const target = sema.mod.getTarget(); - const bit_size = try ty.bitSizeAdvanced(target, sema); + const bit_size = try ty.bitSizeAdvanced(mod, sema); switch (bit_size) { 8, 16, 32, 64, 128 => return true, else => return false, @@ -23346,10 +23388,10 @@ fn validateExternType( }, .Array => { if (position == .ret_ty or position == .param_ty) return false; - return sema.validateExternType(ty.elemType2(), .element); + return sema.validateExternType(ty.elemType2(mod), .element); }, - .Vector => return sema.validateExternType(ty.elemType2(), .element), - .Optional => return ty.isPtrLikeOptional(), + .Vector => return sema.validateExternType(ty.elemType2(mod), .element), + .Optional => return ty.isPtrLikeOptional(mod), } } @@ -23361,7 +23403,7 @@ fn explainWhyTypeIsNotExtern( position: ExternPosition, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Opaque, .Bool, .Float, @@ -23390,7 +23432,7 @@ fn explainWhyTypeIsNotExtern( }, .Void => try mod.errNoteNonLazy(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}), .NoReturn => try mod.errNoteNonLazy(src_loc, msg, "'noreturn' is only allowed as a return type", .{}), - .Int => if (!std.math.isPowerOfTwo(ty.intInfo(sema.mod.getTarget()).bits)) { + .Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) { try mod.errNoteNonLazy(src_loc, msg, "only integers with power of two bits are extern compatible", .{}); } else { try mod.errNoteNonLazy(src_loc, msg, "only integers with 8, 16, 32, 64 and 128 bits are extern compatible", .{}); @@ -23409,8 +23451,7 @@ fn explainWhyTypeIsNotExtern( } }, .Enum => { - var buf: Type.Payload.Bits = undefined; - const tag_ty = ty.intTagType(&buf); + const tag_ty = ty.intTagType(); try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, @@ -23422,17 +23463,17 @@ fn explainWhyTypeIsNotExtern( } else if (position == .param_ty) { return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a parameter type", .{}); } - try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element); + try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element); }, - .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element), + .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element), .Optional => try mod.errNoteNonLazy(src_loc, msg, "only pointer like optionals are extern compatible", .{}), } } /// Returns true if `ty` is allowed in packed types. /// Does *NOT* require `ty` to be resolved in any way. -fn validatePackedType(ty: Type) bool { - switch (ty.zigTypeTag()) { +fn validatePackedType(ty: Type, mod: *const Module) bool { + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, @@ -23448,7 +23489,7 @@ fn validatePackedType(ty: Type) bool { .Fn, .Array, => return false, - .Optional => return ty.isPtrLikeOptional(), + .Optional => return ty.isPtrLikeOptional(mod), .Void, .Bool, .Float, @@ -23468,7 +23509,7 @@ fn explainWhyTypeIsNotPacked( ty: Type, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void, .Bool, .Float, @@ -23731,6 +23772,7 @@ fn panicSentinelMismatch( sentinel_index: Air.Inst.Ref, ) !void { assert(!parent_block.is_comptime); + const mod = sema.mod; const expected_sentinel_val = maybe_sentinel orelse return; const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val); @@ -23743,7 +23785,7 @@ fn panicSentinelMismatch( break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; - const ok = if (sentinel_ty.zigTypeTag() == .Vector) ok: { + const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq); break :ok try parent_block.addInst(.{ @@ -23753,7 +23795,7 @@ fn panicSentinelMismatch( .operation = .And, } }, }); - } else if (sentinel_ty.isSelfComparable(true)) + } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { const panic_fn = try sema.getBuiltin("checkNonScalarSentinel"); @@ -23848,6 +23890,7 @@ fn fieldVal( // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. + const mod = sema.mod; const arena = sema.arena; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -23862,7 +23905,7 @@ fn fieldVal( else object_ty; - switch (inner_ty.zigTypeTag()) { + switch (inner_ty.zigTypeTag(mod)) { .Array => { if (mem.eql(u8, field_name, "len")) { return sema.addConstant( @@ -23926,10 +23969,9 @@ fn fieldVal( object; const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; - var to_type_buffer: Value.ToTypeBuffer = undefined; - const child_type = val.toType(&to_type_buffer); + const child_type = val.toType(); - switch (try child_type.zigTypeTagOrPoison()) { + switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { if (payload.data.names.getEntry(field_name)) |entry| { @@ -23997,7 +24039,7 @@ fn fieldVal( const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); if (child_type.isSlice()) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); - if (child_type.zigTypeTag() == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); + if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -24035,9 +24077,10 @@ fn fieldPtr( // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. + const mod = sema.mod; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); - const object_ty = switch (object_ptr_ty.zigTypeTag()) { + const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { .Pointer => object_ptr_ty.elemType(), else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), }; @@ -24052,7 +24095,7 @@ fn fieldPtr( else object_ty; - switch (inner_ty.zigTypeTag()) { + switch (inner_ty.zigTypeTag(mod)) { .Array => { if (mem.eql(u8, field_name, "len")) { var anon_decl = try block.startAnonDecl(); @@ -24142,10 +24185,9 @@ fn fieldPtr( result; const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; - var to_type_buffer: Value.ToTypeBuffer = undefined; - const child_type = val.toType(&to_type_buffer); + const child_type = val.toType(); - switch (child_type.zigTypeTag()) { + switch (child_type.zigTypeTag(mod)) { .ErrorSet => { // TODO resolve inferred error sets const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { @@ -24258,15 +24300,16 @@ fn fieldCallBind( // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. + const mod = sema.mod; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); - const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) + const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) raw_ptr_ty.childType() else return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); // Optionally dereference a second pointer to get the concrete type. - const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One; + const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize() == .One; const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) @@ -24275,7 +24318,7 @@ fn fieldCallBind( raw_ptr; find_field: { - switch (concrete_ty.zigTypeTag()) { + switch (concrete_ty.zigTypeTag(mod)) { .Struct => { const struct_ty = try sema.resolveTypeFields(concrete_ty); if (struct_ty.castTag(.@"struct")) |struct_obj| { @@ -24321,21 +24364,21 @@ fn fieldCallBind( } // If we get here, we need to look for a decl in the struct type instead. - const found_decl = switch (concrete_ty.zigTypeTag()) { + const found_decl = switch (concrete_ty.zigTypeTag(mod)) { .Struct, .Opaque, .Union, .Enum => found_decl: { if (concrete_ty.getNamespace()) |namespace| { if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| { try sema.addReferencedBy(block, src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); - if (decl_type.zigTypeTag() == .Fn and + if (decl_type.zigTypeTag(mod) == .Fn and decl_type.fnParamLen() >= 1) { const first_param_type = decl_type.fnParamType(0); const first_param_tag = first_param_type.tag(); // zig fmt: off if (first_param_tag == .generic_poison or ( - first_param_type.zigTypeTag() == .Pointer and + first_param_type.zigTypeTag(mod) == .Pointer and (first_param_type.ptrSize() == .One or first_param_type.ptrSize() == .C) and first_param_type.childType().eql(concrete_ty, sema.mod))) @@ -24356,7 +24399,7 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (first_param_type.zigTypeTag() == .Optional) { + } else if (first_param_type.zigTypeTag(mod) == .Optional) { var opt_buf: Type.Payload.ElemType = undefined; const child = first_param_type.optionalChild(&opt_buf); if (child.eql(concrete_ty, sema.mod)) { @@ -24365,7 +24408,7 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (child.zigTypeTag() == .Pointer and + } else if (child.zigTypeTag(mod) == .Pointer and child.ptrSize() == .One and child.childType().eql(concrete_ty, sema.mod)) { @@ -24374,7 +24417,7 @@ fn fieldCallBind( .arg0_inst = object_ptr, } }; } - } else if (first_param_type.zigTypeTag() == .ErrorUnion and + } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and first_param_type.errorUnionPayload().eql(concrete_ty, sema.mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); @@ -24421,9 +24464,10 @@ fn finishFieldCallBind( .@"addrspace" = ptr_ty.ptrAddressSpace(), }); + const mod = sema.mod; const container_ty = ptr_ty.childType(); - if (container_ty.zigTypeTag() == .Struct) { - if (container_ty.structFieldValueComptime(field_index)) |default_val| { + if (container_ty.zigTypeTag(mod) == .Struct) { + if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; } } @@ -24504,7 +24548,8 @@ fn structFieldPtr( unresolved_struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const mod = sema.mod; + assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); try sema.resolveStructLayout(struct_ty); @@ -24544,6 +24589,7 @@ fn structFieldPtrByIndex( return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); } + const mod = sema.mod; const struct_obj = struct_ty.castTag(.@"struct").?.data; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); @@ -24568,7 +24614,7 @@ fn structFieldPtrByIndex( if (i == field_index) { ptr_ty_data.bit_offset = running_bits; } - running_bits += @intCast(u16, f.ty.bitSize(target)); + running_bits += @intCast(u16, f.ty.bitSize(mod)); } ptr_ty_data.host_size = (running_bits + 7) / 8; @@ -24582,7 +24628,7 @@ fn structFieldPtrByIndex( const parent_align = if (struct_ptr_ty_info.@"align" != 0) struct_ptr_ty_info.@"align" else - struct_ptr_ty_info.pointee_type.abiAlignment(target); + struct_ptr_ty_info.pointee_type.abiAlignment(mod); ptr_ty_data.@"align" = parent_align; // If the field happens to be byte-aligned, simplify the pointer type. @@ -24596,8 +24642,8 @@ fn structFieldPtrByIndex( if (parent_align != 0 and ptr_ty_data.bit_offset % 8 == 0 and target.cpu.arch.endian() == .Little) { - const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(target); - const elem_size_bits = ptr_ty_data.pointee_type.bitSize(target); + const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(mod); + const elem_size_bits = ptr_ty_data.pointee_type.bitSize(mod); if (elem_size_bytes * 8 == elem_size_bits) { const byte_offset = ptr_ty_data.bit_offset / 8; const new_align = @as(u32, 1) << @intCast(u5, @ctz(byte_offset | parent_align)); @@ -24644,7 +24690,8 @@ fn structFieldVal( field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { - assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const mod = sema.mod; + assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); switch (struct_ty.tag()) { @@ -24728,9 +24775,10 @@ fn tupleFieldValByIndex( field_index: u32, tuple_ty: Type, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(field_index)) |default_value| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); } @@ -24743,7 +24791,7 @@ fn tupleFieldValByIndex( return sema.addConstant(field_ty, field_values[field_index]); } - if (tuple_ty.structFieldValueComptime(field_index)) |default_val| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(field_ty, default_val); } @@ -24762,7 +24810,9 @@ fn unionFieldPtr( initializing: bool, ) CompileError!Air.Inst.Ref { const arena = sema.arena; - assert(unresolved_union_ty.zigTypeTag() == .Union); + const mod = sema.mod; + + assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ptr_ty = sema.typeOf(union_ptr); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); @@ -24777,7 +24827,7 @@ fn unionFieldPtr( }); const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); - if (initializing and field.ty.zigTypeTag() == .NoReturn) { + if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); @@ -24839,7 +24889,7 @@ fn unionFieldPtr( const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_val); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); } - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -24855,7 +24905,8 @@ fn unionFieldVal( field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { - assert(unresolved_union_ty.zigTypeTag() == .Union); + const mod = sema.mod; + assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); const union_obj = union_ty.cast(Type.Payload.Union).?.data; @@ -24911,7 +24962,7 @@ fn unionFieldVal( const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); } - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -24928,22 +24979,22 @@ fn elemPtr( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const indexable_ptr_src = src; // TODO better source location const indexable_ptr_ty = sema.typeOf(indexable_ptr); - const target = sema.mod.getTarget(); - const indexable_ty = switch (indexable_ptr_ty.zigTypeTag()) { + const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { .Pointer => indexable_ptr_ty.elemType(), else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), }; try checkIndexable(sema, block, src, indexable_ty); - switch (indexable_ty.zigTypeTag()) { + switch (indexable_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety), .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(target)); + const index = @intCast(u32, index_val.toUnsignedInt(mod)); return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -24966,7 +25017,7 @@ fn elemPtrOneLayerOnly( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const target = sema.mod.getTarget(); + const mod = sema.mod; try checkIndexable(sema, block, src, indexable_ty); @@ -24978,7 +25029,7 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); const result_ty = try sema.elemPtrType(indexable_ty, index); return sema.addConstant(result_ty, elem_ptr); @@ -24989,7 +25040,7 @@ fn elemPtrOneLayerOnly( return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety); }, } @@ -25006,7 +25057,7 @@ fn elemVal( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const target = sema.mod.getTarget(); + const mod = sema.mod; try checkIndexable(sema, block, src, indexable_ty); @@ -25014,7 +25065,7 @@ fn elemVal( // index is a scalar or vector instead of unconditionally casting to usize. const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); - switch (indexable_ty.zigTypeTag()) { + switch (indexable_ty.zigTypeTag(mod)) { .Pointer => switch (indexable_ty.ptrSize()) { .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { @@ -25024,10 +25075,10 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { - return sema.addConstant(indexable_ty.elemType2(), elem_val); + return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } break :rs indexable_src; }; @@ -25036,7 +25087,7 @@ fn elemVal( return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, @@ -25049,7 +25100,7 @@ fn elemVal( .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(target)); + const index = @intCast(u32, index_val.toUnsignedInt(mod)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -25093,6 +25144,7 @@ fn tupleFieldPtr( field_index: u32, init: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(); _ = try sema.resolveTypeFields(tuple_ty); @@ -25116,7 +25168,7 @@ fn tupleFieldPtr( .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(), }); - if (tuple_ty.structFieldValueComptime(field_index)) |default_val| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ .field_ty = field_ty, .field_val = default_val, @@ -25151,6 +25203,7 @@ fn tupleField( field_index_src: LazySrcLoc, field_index: u32, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const tuple_ty = try sema.resolveTypeFields(sema.typeOf(tuple)); const field_count = tuple_ty.structFieldCount(); @@ -25166,13 +25219,13 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(field_index)) |default_value| { + if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, field_index)); + return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25191,6 +25244,7 @@ fn elemValArray( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const array_ty = sema.typeOf(array); const array_sent = array_ty.sentinel(); const array_len = array_ty.arrayLen(); @@ -25204,10 +25258,9 @@ fn elemValArray( const maybe_undef_array_val = try sema.resolveMaybeUndefVal(array); // index must be defined since it can access out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); - const target = sema.mod.getTarget(); if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); if (array_sent) |s| { if (index == array_len) { return sema.addConstant(elem_ty, s); @@ -25223,7 +25276,7 @@ fn elemValArray( return sema.addConstUndef(elem_ty); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const elem_val = try array_val.elemValue(sema.mod, sema.arena, index); return sema.addConstant(elem_ty, elem_val); } @@ -25255,7 +25308,7 @@ fn elemPtrArray( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); + const mod = sema.mod; const array_ptr_ty = sema.typeOf(array_ptr); const array_ty = array_ptr_ty.childType(); const array_sent = array_ty.sentinel() != null; @@ -25269,7 +25322,7 @@ fn elemPtrArray( const maybe_undef_array_ptr_val = try sema.resolveMaybeUndefVal(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target)); + const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); @@ -25290,7 +25343,7 @@ fn elemPtrArray( } if (!init) { - try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(), array_ty, array_ptr_src); + try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src); } const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src; @@ -25316,16 +25369,16 @@ fn elemValSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel() != null; - const elem_ty = slice_ty.elemType2(); + const elem_ty = slice_ty.elemType2(mod); var runtime_src = slice_src; // slice must be defined since it can dereferenced as null const maybe_slice_val = try sema.resolveDefinedValue(block, slice_src, slice); // index must be defined since it can index out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); - const target = sema.mod.getTarget(); if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; @@ -25335,7 +25388,7 @@ fn elemValSlice( return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); @@ -25373,14 +25426,14 @@ fn elemPtrSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); + const mod = sema.mod; const slice_ty = sema.typeOf(slice); const slice_sent = slice_ty.sentinel() != null; const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target)); + const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod)); break :o index; } else null; @@ -25484,6 +25537,7 @@ fn coerceExtra( const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst)); + const mod = sema.mod; const target = sema.mod.getTarget(); // If the types are the same, we can return the operand. if (dest_ty.eql(inst_ty, sema.mod)) @@ -25502,9 +25556,9 @@ fn coerceExtra( return block.addBitCast(dest_ty, inst); } - const is_undef = inst_ty.zigTypeTag() == .Undefined; + const is_undef = inst_ty.zigTypeTag(mod) == .Undefined; - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Optional => optional: { // undefined sets the optional bit also to undefined. if (is_undef) { @@ -25512,18 +25566,18 @@ fn coerceExtra( } // null to ?T - if (inst_ty.zigTypeTag() == .Null) { + if (inst_ty.zigTypeTag(mod) == .Null) { return sema.addConstant(dest_ty, Value.null); } // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer - if (dest_ty.isPtrLikeOptional() and dest_ty.elemType2().tag() == .anyopaque and - inst_ty.isPtrAtRuntime()) + if (dest_ty.isPtrLikeOptional(mod) and dest_ty.elemType2(mod).tag() == .anyopaque and + inst_ty.isPtrAtRuntime(mod)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; - const elem_ty = inst_ty.elemType2(); - if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) { + const elem_ty = inst_ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25532,7 +25586,7 @@ fn coerceExtra( } // Let the logic below handle wrapping the optional now that // it has been checked to correctly coerce. - if (!inst_ty.isPtrLikeOptional()) break :anyopaque_check; + if (!inst_ty.isPtrLikeOptional(mod)) break :anyopaque_check; return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } @@ -25554,7 +25608,7 @@ fn coerceExtra( const dest_info = dest_ty.ptrInfo().data; // Function body to function pointer. - if (inst_ty.zigTypeTag() == .Fn) { + if (inst_ty.zigTypeTag(mod) == .Fn) { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); const fn_decl = fn_val.pointerDecl().?; const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); @@ -25568,7 +25622,7 @@ fn coerceExtra( if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const ptr_elem_ty = inst_ty.childType(); const array_ty = dest_info.pointee_type; - if (array_ty.zigTypeTag() != .Array) break :single_item; + if (array_ty.zigTypeTag(mod) != .Array) break :single_item; const array_elem_ty = array_ty.childType(); if (array_ty.arrayLen() != 1) break :single_item; const dest_is_mut = dest_info.mutable; @@ -25584,7 +25638,7 @@ fn coerceExtra( if (!inst_ty.isSinglePointer()) break :src_array_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const array_ty = inst_ty.childType(); - if (array_ty.zigTypeTag() != .Array) break :src_array_ptr; + if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr; const array_elem_type = array_ty.childType(); const dest_is_mut = dest_info.mutable; @@ -25656,10 +25710,10 @@ fn coerceExtra( // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer - if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag() == .Pointer) to_anyopaque: { + if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const elem_ty = inst_ty.elemType2(); - if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) { + const elem_ty = inst_ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25679,7 +25733,7 @@ fn coerceExtra( switch (dest_info.size) { // coercion to C pointer - .C => switch (inst_ty.zigTypeTag()) { + .C => switch (inst_ty.zigTypeTag(mod)) { .Null => { return sema.addConstant(dest_ty, Value.null); }, @@ -25691,7 +25745,7 @@ fn coerceExtra( return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, .Int => { - const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) { + const ptr_size_ty = switch (inst_ty.intInfo(mod).signedness) { .signed => Type.isize, .unsigned => Type.usize, }; @@ -25733,7 +25787,7 @@ fn coerceExtra( }, else => {}, }, - .One => switch (dest_info.pointee_type.zigTypeTag()) { + .One => switch (dest_info.pointee_type.zigTypeTag(mod)) { .Union => { // pointer to anonymous struct to pointer to union if (inst_ty.isSinglePointer() and @@ -25767,7 +25821,7 @@ fn coerceExtra( else => {}, }, .Slice => to_slice: { - if (inst_ty.zigTypeTag() == .Array) { + if (inst_ty.zigTypeTag(mod) == .Array) { return sema.fail( block, inst_src, @@ -25789,7 +25843,7 @@ fn coerceExtra( .ptr = if (dest_info.@"align" != 0) try Value.Tag.int_u64.create(sema.arena, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(target, sema.arena), + try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), .len = Value.zero, }); return sema.addConstant(dest_ty, slice_val); @@ -25834,13 +25888,13 @@ fn coerceExtra( }, } }, - .Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) { + .Int, .ComptimeInt => switch (inst_ty.zigTypeTag(mod)) { .Float, .ComptimeFloat => float: { if (is_undef) { return sema.addConstUndef(dest_ty); } const val = (try sema.resolveMaybeUndefVal(inst)) orelse { - if (dest_ty.zigTypeTag() == .ComptimeInt) { + if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known"); } @@ -25870,15 +25924,15 @@ fn coerceExtra( } return try sema.addConstant(dest_ty, val); } - if (dest_ty.zigTypeTag() == .ComptimeInt) { + if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; if (opts.no_cast_to_comptime_int) return inst; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known"); } // integer widening - const dst_info = dest_ty.intInfo(target); - const src_info = inst_ty.intInfo(target); + const dst_info = dest_ty.intInfo(mod); + const src_info = inst_ty.intInfo(mod); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (dst_info.signedness == .signed and dst_info.bits > src_info.bits)) @@ -25892,7 +25946,7 @@ fn coerceExtra( }, else => {}, }, - .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag()) { + .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) { .ComptimeFloat => { const val = try sema.resolveConstValue(block, .unneeded, inst, ""); const result_val = try val.floatCast(sema.arena, dest_ty, target); @@ -25913,7 +25967,7 @@ fn coerceExtra( ); } return try sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -25931,7 +25985,7 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } const val = (try sema.resolveMaybeUndefVal(inst)) orelse { - if (dest_ty.zigTypeTag() == .ComptimeFloat) { + if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -25955,7 +26009,7 @@ fn coerceExtra( }, else => {}, }, - .Enum => switch (inst_ty.zigTypeTag()) { + .Enum => switch (inst_ty.zigTypeTag(mod)) { .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); @@ -25991,7 +26045,7 @@ fn coerceExtra( }, else => {}, }, - .ErrorUnion => switch (inst_ty.zigTypeTag()) { + .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { switch (inst_val.tag()) { @@ -26031,7 +26085,7 @@ fn coerceExtra( }; }, }, - .Union => switch (inst_ty.zigTypeTag()) { + .Union => switch (inst_ty.zigTypeTag(mod)) { .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst_ty.isAnonStruct()) { @@ -26043,7 +26097,7 @@ fn coerceExtra( }, else => {}, }, - .Array => switch (inst_ty.zigTypeTag()) { + .Array => switch (inst_ty.zigTypeTag(mod)) { .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst == .empty_struct) { @@ -26058,7 +26112,7 @@ fn coerceExtra( }, else => {}, }, - .Vector => switch (inst_ty.zigTypeTag()) { + .Vector => switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst_ty.isTuple()) { @@ -26093,7 +26147,7 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; - if (opts.is_ret and dest_ty.zigTypeTag() == .NoReturn) { + if (opts.is_ret and dest_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{}); errdefer msg.destroy(sema.gpa); @@ -26111,7 +26165,7 @@ fn coerceExtra( errdefer msg.destroy(sema.gpa); // E!T to T - if (inst_ty.zigTypeTag() == .ErrorUnion and + if (inst_ty.zigTypeTag(mod) == .ErrorUnion and (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{}); @@ -26120,7 +26174,7 @@ fn coerceExtra( // ?T to T var buf: Type.Payload.ElemType = undefined; - if (inst_ty.zigTypeTag() == .Optional and + if (inst_ty.zigTypeTag(mod) == .Optional and (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{}); @@ -26133,7 +26187,7 @@ fn coerceExtra( if (opts.is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) { const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - if (inst_ty.isError() and !dest_ty.isError()) { + if (inst_ty.isError(mod) and !dest_ty.isError(mod)) { try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function cannot return an error", .{}); } else { try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function return type declared here", .{}); @@ -26264,6 +26318,7 @@ const InMemoryCoercionResult = union(enum) { } fn report(res: *const InMemoryCoercionResult, sema: *Sema, block: *Block, src: LazySrcLoc, msg: *Module.ErrorMsg) !void { + const mod = sema.mod; var cur = res; while (true) switch (cur.*) { .ok => unreachable, @@ -26445,8 +26500,8 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_allowzero => |pair| { - const wanted_allow_zero = pair.wanted.ptrAllowsZero(); - const actual_allow_zero = pair.actual.ptrAllowsZero(); + const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod); + const actual_allow_zero = pair.actual.ptrAllowsZero(mod); if (actual_allow_zero and !wanted_allow_zero) { try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{ pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), @@ -26522,13 +26577,15 @@ fn coerceInMemoryAllowed( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) CompileError!InMemoryCoercionResult { - if (dest_ty.eql(src_ty, sema.mod)) + const mod = sema.mod; + + if (dest_ty.eql(src_ty, mod)) return .ok; // Differently-named integers with the same number of bits. - if (dest_ty.zigTypeTag() == .Int and src_ty.zigTypeTag() == .Int) { - const dest_info = dest_ty.intInfo(target); - const src_info = src_ty.intInfo(target); + if (dest_ty.zigTypeTag(mod) == .Int and src_ty.zigTypeTag(mod) == .Int) { + const dest_info = dest_ty.intInfo(mod); + const src_info = src_ty.intInfo(mod); if (dest_info.signedness == src_info.signedness and dest_info.bits == src_info.bits) @@ -26551,7 +26608,7 @@ fn coerceInMemoryAllowed( } // Differently-named floats with the same number of bits. - if (dest_ty.zigTypeTag() == .Float and src_ty.zigTypeTag() == .Float) { + if (dest_ty.zigTypeTag(mod) == .Float and src_ty.zigTypeTag(mod) == .Float) { const dest_bits = dest_ty.floatBits(target); const src_bits = src_ty.floatBits(target); if (dest_bits == src_bits) { @@ -26575,8 +26632,8 @@ fn coerceInMemoryAllowed( return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } - const dest_tag = dest_ty.zigTypeTag(); - const src_tag = src_ty.zigTypeTag(); + const dest_tag = dest_ty.zigTypeTag(mod); + const src_tag = src_ty.zigTypeTag(mod); // Functions if (dest_tag == .Fn and src_tag == .Fn) { @@ -26624,7 +26681,7 @@ fn coerceInMemoryAllowed( } const ok_sent = dest_info.sentinel == null or (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, sema.mod)); + dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, mod)); if (!ok_sent) { return InMemoryCoercionResult{ .array_sentinel = .{ .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), @@ -26646,8 +26703,8 @@ fn coerceInMemoryAllowed( } }; } - const dest_elem_ty = dest_ty.scalarType(); - const src_elem_ty = src_ty.scalarType(); + const dest_elem_ty = dest_ty.scalarType(mod); + const src_elem_ty = src_ty.scalarType(mod); const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .vector_elem = .{ @@ -26923,6 +26980,7 @@ fn coerceInMemoryAllowedPtrs( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { + const mod = sema.mod; const dest_info = dest_ptr_ty.ptrInfo().data; const src_info = src_ptr_ty.ptrInfo().data; @@ -26964,8 +27022,8 @@ fn coerceInMemoryAllowedPtrs( } }; } - const dest_allow_zero = dest_ty.ptrAllowsZero(); - const src_allow_zero = src_ty.ptrAllowsZero(); + const dest_allow_zero = dest_ty.ptrAllowsZero(mod); + const src_allow_zero = src_ty.ptrAllowsZero(mod); const ok_allows_zero = (dest_allow_zero and (src_allow_zero or !dest_is_mut)) or @@ -27013,12 +27071,12 @@ fn coerceInMemoryAllowedPtrs( const src_align = if (src_info.@"align" != 0) src_info.@"align" else - src_info.pointee_type.abiAlignment(target); + src_info.pointee_type.abiAlignment(mod); const dest_align = if (dest_info.@"align" != 0) dest_info.@"align" else - dest_info.pointee_type.abiAlignment(target); + dest_info.pointee_type.abiAlignment(mod); if (dest_align > src_align) { return InMemoryCoercionResult{ .ptr_alignment = .{ @@ -27041,8 +27099,9 @@ fn coerceVarArgParam( ) !Air.Inst.Ref { if (block.is_typeof) return inst; + const mod = sema.mod; const uncasted_ty = sema.typeOf(inst); - const coerced = switch (uncasted_ty.zigTypeTag()) { + const coerced = switch (uncasted_ty.zigTypeTag(mod)) { // TODO consider casting to c_int/f64 if they fit .ComptimeInt, .ComptimeFloat => return sema.fail( block, @@ -27124,7 +27183,8 @@ fn storePtr2( // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); - if (operand_ty.isTuple() and elem_ty.zigTypeTag() == .Array) { + const mod = sema.mod; + if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) { const field_count = operand_ty.structFieldCount(); var i: u32 = 0; while (i < field_count) : (i += 1) { @@ -27225,7 +27285,8 @@ fn storePtr2( /// lengths match. fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const array_ty = sema.typeOf(ptr).childType(); - if (array_ty.zigTypeTag() != .Array) return null; + const mod = sema.mod; + if (array_ty.zigTypeTag(mod) != .Array) return null; var ptr_inst = Air.refToIndex(ptr) orelse return null; const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); @@ -27237,7 +27298,7 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { .pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type, else => return null, }; - if (prev_ptr_child_ty.zigTypeTag() == .Vector) break prev_ptr; + if (prev_ptr_child_ty.zigTypeTag(mod) == .Vector) break prev_ptr; ptr_inst = Air.refToIndex(prev_ptr) orelse return null; } else return null; @@ -27263,6 +27324,7 @@ fn storePtrVal( operand_val: Value, operand_ty: Type, ) !void { + const mod = sema.mod; var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty); try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut); @@ -27281,8 +27343,7 @@ fn storePtrVal( val_ptr.* = try operand_val.copy(arena); }, .reinterpret => |reinterpret| { - const target = sema.mod.getTarget(); - const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(target)); + const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { @@ -27354,7 +27415,7 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { - const target = sema.mod.getTarget(); + const mod = sema.mod; switch (ptr_val.tag()) { .decl_ref_mut => { const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; @@ -27375,7 +27436,7 @@ fn beginComptimePtrMutation( var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag()) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { .Array, .Vector => { const check_len = parent.ty.arrayLenIncludingSentinel(); if (elem_ptr.index >= check_len) { @@ -27570,7 +27631,7 @@ fn beginComptimePtrMutation( }, }, .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout()) { + if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { // Even though the parent value type has well-defined memory layout, our // pointer type does not. return ComptimePtrMutationKit{ @@ -27608,7 +27669,7 @@ fn beginComptimePtrMutation( const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); - switch (parent.ty.zigTypeTag()) { + switch (parent.ty.zigTypeTag(mod)) { .Struct => { const fields = try arena.alloc(Value, parent.ty.structFieldCount()); @memset(fields, Value.undef); @@ -27746,7 +27807,7 @@ fn beginComptimePtrMutation( else => unreachable, }, .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, target); + const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); const field_offset = try sema.usizeCast(block, src, field_offset_u64); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, @@ -27872,7 +27933,8 @@ fn beginComptimePtrMutationInner( ptr_elem_ty: Type, decl_ref_mut: Value.Payload.DeclRefMut.Data, ) CompileError!ComptimePtrMutationKit { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; if (coerce_ok) { return ComptimePtrMutationKit{ @@ -27883,7 +27945,7 @@ fn beginComptimePtrMutationInner( } // Handle the case that the decl is an array and we're actually trying to point to an element. - if (decl_ty.isArrayOrVector()) { + if (decl_ty.isArrayOrVector(mod)) { const decl_elem_ty = decl_ty.childType(); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ @@ -27894,14 +27956,14 @@ fn beginComptimePtrMutationInner( } } - if (!decl_ty.hasWellDefinedLayout()) { + if (!decl_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, .pointee = .{ .bad_decl_ty = {} }, .ty = decl_ty, }; } - if (!ptr_elem_ty.hasWellDefinedLayout()) { + if (!ptr_elem_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, .pointee = .{ .bad_ptr_ty = {} }, @@ -27951,6 +28013,7 @@ fn beginComptimePtrLoad( ptr_val: Value, maybe_array_ty: ?Type, ) ComptimePtrLoadError!ComptimePtrLoadKit { + const mod = sema.mod; const target = sema.mod.getTarget(); var deref: ComptimePtrLoadKit = switch (ptr_val.tag()) { .decl_ref, @@ -27966,7 +28029,7 @@ fn beginComptimePtrLoad( const decl_tv = try decl.typedValue(); if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; - const layout_defined = decl.ty.hasWellDefinedLayout(); + const layout_defined = decl.ty.hasWellDefinedLayout(mod); break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, .pointee = decl_tv, @@ -27988,7 +28051,7 @@ fn beginComptimePtrLoad( } if (elem_ptr.index != 0) { - if (elem_ty.hasWellDefinedLayout()) { + if (elem_ty.hasWellDefinedLayout(mod)) { if (deref.parent) |*parent| { // Update the byte offset (in-place) const elem_size = try sema.typeAbiSize(elem_ty); @@ -28003,7 +28066,7 @@ fn beginComptimePtrLoad( // If we're loading an elem_ptr that was derived from a different type // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector()) x: { + const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { const deref_elem_ty = deref.pointee.?.ty.childType(); break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; @@ -28018,7 +28081,7 @@ fn beginComptimePtrLoad( if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector() and load_ty.childType().eql(elem_ty, sema.mod)) { + if (load_ty.isArrayOrVector(mod) and load_ty.childType().eql(elem_ty, sema.mod)) { const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), @@ -28058,7 +28121,7 @@ fn beginComptimePtrLoad( const field_index = @intCast(u32, field_ptr.field_index); var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); - if (field_ptr.container_ty.hasWellDefinedLayout()) { + if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { const struct_ty = field_ptr.container_ty.castTag(.@"struct"); if (struct_ty != null and struct_ty.?.data.layout == .Packed) { // packed structs are not byte addressable @@ -28066,7 +28129,7 @@ fn beginComptimePtrLoad( } else if (deref.parent) |*parent| { // Update the byte offset (in-place) try sema.resolveTypeLayout(field_ptr.container_ty); - const field_offset = field_ptr.container_ty.structFieldOffset(field_index, target); + const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod); parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); } } else { @@ -28103,7 +28166,7 @@ fn beginComptimePtrLoad( const field_ty = field_ptr.container_ty.structFieldType(field_index); deref.pointee = TypedValue{ .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, field_index), + .val = tv.val.fieldValue(tv.ty, mod, field_index), }; } break :blk deref; @@ -28146,7 +28209,7 @@ fn beginComptimePtrLoad( return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); }, .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull()) return sema.fail(block, src, "attempt to use null value", .{}); + if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); break :opt tv.val; }, else => unreachable, @@ -28181,7 +28244,7 @@ fn beginComptimePtrLoad( }; if (deref.pointee) |tv| { - if (deref.parent == null and tv.ty.hasWellDefinedLayout()) { + if (deref.parent == null and tv.ty.hasWellDefinedLayout(mod)) { deref.parent = .{ .tv = tv, .byte_offset = 0 }; } } @@ -28196,15 +28259,15 @@ fn bitCast( inst_src: LazySrcLoc, operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); try sema.resolveTypeLayout(dest_ty); const old_ty = try sema.resolveTypeFields(sema.typeOf(inst)); try sema.resolveTypeLayout(old_ty); - const target = sema.mod.getTarget(); - const dest_bits = dest_ty.bitSize(target); - const old_bits = old_ty.bitSize(target); + const dest_bits = dest_ty.bitSize(mod); + const old_bits = old_ty.bitSize(mod); if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ @@ -28233,20 +28296,20 @@ fn bitCastVal( new_ty: Type, buffer_offset: usize, ) !?Value { - const target = sema.mod.getTarget(); - if (old_ty.eql(new_ty, sema.mod)) return val; + const mod = sema.mod; + if (old_ty.eql(new_ty, mod)) return val; // For types with well-defined memory layouts, we serialize them a byte buffer, // then deserialize to the new type. - const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target)); + const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); - val.writeToMemory(old_ty, sema.mod, buffer) catch |err| switch (err) { + val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) { error.ReinterpretDeclRef => return null, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}), }; - return try Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena); + return try Value.readFromMemory(new_ty, mod, buffer[buffer_offset..], sema.arena); } fn coerceArrayPtrToSlice( @@ -28272,7 +28335,8 @@ fn coerceArrayPtrToSlice( fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool { const dest_info = dest_ty.ptrInfo().data; const inst_info = inst_ty.ptrInfo().data; - const len0 = (inst_info.pointee_type.zigTypeTag() == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or + const mod = sema.mod; + const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or (inst_info.pointee_type.arrayLen() == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0); @@ -28298,17 +28362,16 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul } if (inst_info.@"align" == 0 and dest_info.@"align" == 0) return true; if (len0) return true; - const target = sema.mod.getTarget(); const inst_align = if (inst_info.@"align" != 0) inst_info.@"align" else - inst_info.pointee_type.abiAlignment(target); + inst_info.pointee_type.abiAlignment(mod); const dest_align = if (dest_info.@"align" != 0) dest_info.@"align" else - dest_info.pointee_type.abiAlignment(target); + dest_info.pointee_type.abiAlignment(mod); if (dest_align > inst_align) { in_memory_result.* = .{ .ptr_alignment = .{ @@ -28327,18 +28390,19 @@ fn coerceCompatiblePtrs( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); if (try sema.resolveMaybeUndefVal(inst)) |val| { - if (!val.isUndef() and val.isNull() and !dest_ty.isAllowzeroPtr()) { + if (!val.isUndef() and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } // The comptime Value representation is compatible with both types. return sema.addConstant(dest_ty, val); } try sema.requireRuntimeBlock(block, inst_src, null); - const inst_allows_zero = inst_ty.zigTypeTag() != .Pointer or inst_ty.ptrAllowsZero(); - if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero() and - (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn)) + const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod); + if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and + (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { const actual_ptr = if (inst_ty.isSlice()) try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty) @@ -28364,6 +28428,7 @@ fn coerceEnumToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); const tag_ty = union_ty.unionTagType() orelse { @@ -28396,7 +28461,7 @@ fn coerceEnumToUnion( const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.values()[field_index]; const field_ty = try sema.resolveTypeFields(field.ty); - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); @@ -28449,7 +28514,7 @@ fn coerceEnumToUnion( errdefer if (msg) |some| some.destroy(sema.gpa); for (union_obj.fields.values(), 0..) |field, i| { - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { const err_msg = msg orelse try sema.errMsg( block, inst_src, @@ -28469,7 +28534,7 @@ fn coerceEnumToUnion( } // If the union has all fields 0 bits, the union value is just the enum value. - if (union_ty.unionHasAllZeroBitFieldTypes()) { + if (union_ty.unionHasAllZeroBitFieldTypes(mod)) { return block.addBitCast(union_ty, enum_tag); } @@ -28487,7 +28552,7 @@ fn coerceEnumToUnion( while (it.next()) |field| : (field_index += 1) { const field_name = field.key_ptr.*; const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; + if (!(try sema.typeHasRuntimeBits(field_ty))) continue; try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) }); } try sema.addDeclaredHereNote(msg, union_ty); @@ -29066,12 +29131,13 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo } fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { - const decl = sema.mod.declPtr(decl_index); + const mod = sema.mod; + const decl = mod.declPtr(decl_index); const tv = try decl.typedValue(); - if (tv.ty.zigTypeTag() != .Fn) return; + if (tv.ty.zigTypeTag(mod) != .Fn) return; if (!try sema.fnHasRuntimeBits(tv.ty)) return; const func = tv.val.castTag(.function) orelse return; // undef or extern_fn - try sema.mod.ensureFuncBodyAnalysisQueued(func.data); + try mod.ensureFuncBodyAnalysisQueued(func.data); } fn analyzeRef( @@ -29124,8 +29190,9 @@ fn analyzeLoad( ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); - const elem_ty = switch (ptr_ty.zigTypeTag()) { + const elem_ty = switch (ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ty.childType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; @@ -29196,12 +29263,13 @@ fn analyzeIsNull( operand: Air.Inst.Ref, invert_logic: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const result_ty = Type.bool; if (try sema.resolveMaybeUndefVal(operand)) |opt_val| { if (opt_val.isUndef()) { return sema.addConstUndef(result_ty); } - const is_null = opt_val.isNull(); + const is_null = opt_val.isNull(mod); const bool_value = if (invert_logic) !is_null else is_null; if (bool_value) { return Air.Inst.Ref.bool_true; @@ -29213,10 +29281,10 @@ fn analyzeIsNull( const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; const operand_ty = sema.typeOf(operand); var buf: Type.Payload.ElemType = undefined; - if (operand_ty.zigTypeTag() == .Optional and operand_ty.optionalChild(&buf).zigTypeTag() == .NoReturn) { + if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(&buf).zigTypeTag(mod) == .NoReturn) { return inverted_non_null_res; } - if (operand_ty.zigTypeTag() != .Optional and !operand_ty.isPtrLikeOptional()) { + if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) { return inverted_non_null_res; } try sema.requireRuntimeBlock(block, src, null); @@ -29230,11 +29298,12 @@ fn analyzePtrIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ptr_ty = sema.typeOf(operand); - assert(ptr_ty.zigTypeTag() == .Pointer); + assert(ptr_ty.zigTypeTag(mod) == .Pointer); const child_ty = ptr_ty.childType(); - const child_tag = child_ty.zigTypeTag(); + const child_tag = child_ty.zigTypeTag(mod); if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true; if (child_tag == .ErrorSet) return Air.Inst.Ref.bool_false; assert(child_tag == .ErrorUnion); @@ -29251,14 +29320,15 @@ fn analyzeIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - const ot = operand_ty.zigTypeTag(); + const ot = operand_ty.zigTypeTag(mod); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); const payload_ty = operand_ty.errorUnionPayload(); - if (payload_ty.zigTypeTag() == .NoReturn) { + if (payload_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } @@ -29375,22 +29445,21 @@ fn analyzeSlice( end_src: LazySrcLoc, by_length: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; // Slice expressions can operate on a variable whose type is an array. This requires // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); - const target = sema.mod.getTarget(); - const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) { + const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { .Pointer => ptr_ptr_ty.elemType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}), }; - const mod = sema.mod; var array_ty = ptr_ptr_child_ty; var slice_ty = ptr_ptr_ty; var ptr_or_slice = ptr_ptr; var elem_ty: Type = undefined; var ptr_sentinel: ?Value = null; - switch (ptr_ptr_child_ty.zigTypeTag()) { + switch (ptr_ptr_child_ty.zigTypeTag(mod)) { .Array => { ptr_sentinel = ptr_ptr_child_ty.sentinel(); elem_ty = ptr_ptr_child_ty.childType(); @@ -29398,7 +29467,7 @@ fn analyzeSlice( .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { .One => { const double_child_ty = ptr_ptr_child_ty.childType(); - if (double_child_ty.zigTypeTag() == .Array) { + if (double_child_ty.zigTypeTag(mod) == .Array) { ptr_sentinel = double_child_ty.sentinel(); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; @@ -29417,7 +29486,7 @@ fn analyzeSlice( if (ptr_ptr_child_ty.ptrSize() == .C) { if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| { - if (ptr_val.isNull()) { + if (ptr_val.isNull(mod)) { return sema.fail(block, src, "slice of null pointer", .{}); } } @@ -29448,7 +29517,7 @@ fn analyzeSlice( // we might learn of the length because it is a comptime-known slice value. var end_is_len = uncasted_end_opt == .none; const end = e: { - if (array_ty.zigTypeTag() == .Array) { + if (array_ty.zigTypeTag(mod) == .Array) { const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()); if (!end_is_len) { @@ -29587,8 +29656,8 @@ fn analyzeSlice( } if (try sema.resolveMaybeUndefVal(new_ptr)) |ptr_val| sentinel_check: { const expected_sentinel = sentinel orelse break :sentinel_check; - const start_int = start_val.getUnsignedInt(sema.mod.getTarget()).?; - const end_int = end_val.getUnsignedInt(sema.mod.getTarget()).?; + const start_int = start_val.getUnsignedInt(mod).?; + const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); const elem_ptr = try ptr_val.elemPtr(sema.typeOf(new_ptr), sema.arena, sentinel_index, sema.mod); @@ -29641,7 +29710,7 @@ fn analyzeSlice( const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; if (opt_new_len_val) |new_len_val| { - const new_len_int = new_len_val.toUnsignedInt(target); + const new_len_int = new_len_val.toUnsignedInt(mod); const return_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty, mod), @@ -29724,7 +29793,7 @@ fn analyzeSlice( } // requirement: end <= len - const opt_len_inst = if (array_ty.zigTypeTag() == .Array) + const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel()) else if (slice_ty.isSlice()) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { @@ -29778,14 +29847,15 @@ fn cmpNumeric( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); - assert(lhs_ty.isNumeric()); - assert(rhs_ty.isNumeric()); + assert(lhs_ty.isNumeric(mod)); + assert(rhs_ty.isNumeric(mod)); - const lhs_ty_tag = lhs_ty.zigTypeTag(); - const rhs_ty_tag = rhs_ty.zigTypeTag(); + const lhs_ty_tag = lhs_ty.zigTypeTag(mod); + const rhs_ty_tag = rhs_ty.zigTypeTag(mod); const target = sema.mod.getTarget(); // One exception to heterogeneous comparison: comptime_float needs to @@ -29805,14 +29875,14 @@ fn cmpNumeric( if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { // Compare ints: const vs. undefined (or vice versa) - if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt() and rhs_val.isUndef()) { + if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef()) { try sema.resolveLazyValue(lhs_val); - if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } - } else if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt() and lhs_val.isUndef()) { + } else if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef()) { try sema.resolveLazyValue(rhs_val); - if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29827,16 +29897,16 @@ fn cmpNumeric( return Air.Inst.Ref.bool_false; } } - if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, target, sema)) { + if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { - if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt()) { + if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { // Compare ints: const vs. var try sema.resolveLazyValue(lhs_val); - if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29844,10 +29914,10 @@ fn cmpNumeric( } } else { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt()) { + if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { // Compare ints: var vs. const try sema.resolveLazyValue(rhs_val); - if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29901,11 +29971,11 @@ fn cmpNumeric( const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) else - (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt()); + (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| !(try rhs_val.compareAllWithZeroAdvanced(.gte, sema)) else - (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt()); + (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; @@ -29926,7 +29996,7 @@ fn cmpNumeric( .lt, .lte => return if (lhs_val.isNegativeInf()) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, }; if (!rhs_is_signed) { - switch (lhs_val.orderAgainstZero()) { + switch (lhs_val.orderAgainstZero(mod)) { .gt => {}, .eq => switch (op) { // LHS = 0, RHS is unsigned .lte => return Air.Inst.Ref.bool_true, @@ -29959,13 +30029,13 @@ fn cmpNumeric( } lhs_bits = bigint.toConst().bitCountTwosComp(); } else { - lhs_bits = lhs_val.intBitCountTwosComp(target); + lhs_bits = lhs_val.intBitCountTwosComp(mod); } lhs_bits += @boolToInt(!lhs_is_signed and dest_int_is_signed); } else if (lhs_is_float) { dest_float_type = lhs_ty; } else { - const int_info = lhs_ty.intInfo(target); + const int_info = lhs_ty.intInfo(mod); lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } @@ -29985,7 +30055,7 @@ fn cmpNumeric( .lt, .lte => return if (rhs_val.isNegativeInf()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, }; if (!lhs_is_signed) { - switch (rhs_val.orderAgainstZero()) { + switch (rhs_val.orderAgainstZero(mod)) { .gt => {}, .eq => switch (op) { // RHS = 0, LHS is unsigned .gte => return Air.Inst.Ref.bool_true, @@ -30018,13 +30088,13 @@ fn cmpNumeric( } rhs_bits = bigint.toConst().bitCountTwosComp(); } else { - rhs_bits = rhs_val.intBitCountTwosComp(target); + rhs_bits = rhs_val.intBitCountTwosComp(mod); } rhs_bits += @boolToInt(!rhs_is_signed and dest_int_is_signed); } else if (rhs_is_float) { dest_float_type = rhs_ty; } else { - const int_info = rhs_ty.intInfo(target); + const int_info = rhs_ty.intInfo(mod); rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } @@ -30032,7 +30102,7 @@ fn cmpNumeric( const max_bits = std.math.max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}); const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; - break :blk try Module.makeIntType(sema.arena, signedness, casted_bits); + break :blk try mod.intType(signedness, casted_bits); }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); @@ -30040,13 +30110,20 @@ fn cmpNumeric( return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs); } -/// Asserts that LHS value is an int or comptime int and not undefined, and that RHS type is an int. -/// Given a const LHS and an unknown RHS, attempt to determine whether `op` has a guaranteed result. +/// Asserts that LHS value is an int or comptime int and not undefined, and +/// that RHS type is an int. Given a const LHS and an unknown RHS, attempt to +/// determine whether `op` has a guaranteed result. /// If it cannot be determined, returns null. /// Otherwise returns a bool for the guaranteed comparison operation. -fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value, op: std.math.CompareOperator, rhs_ty: Type) ?bool { - const rhs_info = rhs_ty.intInfo(target); - const vs_zero = lhs_val.orderAgainstZeroAdvanced(sema) catch unreachable; +fn compareIntsOnlyPossibleResult( + sema: *Sema, + lhs_val: Value, + op: std.math.CompareOperator, + rhs_ty: Type, +) Allocator.Error!?bool { + const mod = sema.mod; + const rhs_info = rhs_ty.intInfo(mod); + const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; @@ -30078,7 +30155,7 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value }; const sign_adj = @boolToInt(!is_negative and rhs_info.signedness == .signed); - const req_bits = lhs_val.intBitCountTwosComp(target) + sign_adj; + const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj; // No sized type can have more than 65535 bits. // The RHS type operand is either a runtime value or sized (but undefined) constant. @@ -30111,12 +30188,11 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value .max = false, }; - var ty_buffer: Type.Payload.Bits = .{ - .base = .{ .tag = if (is_negative) .int_signed else .int_unsigned }, - .data = @intCast(u16, req_bits), - }; - const ty = Type.initPayload(&ty_buffer.base); - const pop_count = lhs_val.popCount(ty, target); + const ty = try mod.intType( + if (is_negative) .signed else .unsigned, + @intCast(u16, req_bits), + ); + const pop_count = lhs_val.popCount(ty, mod); if (is_negative) { break :edge .{ @@ -30152,10 +30228,11 @@ fn cmpVector( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - assert(lhs_ty.zigTypeTag() == .Vector); - assert(rhs_ty.zigTypeTag() == .Vector); + assert(lhs_ty.zigTypeTag(mod) == .Vector); + assert(rhs_ty.zigTypeTag(mod) == .Vector); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } }); @@ -30296,16 +30373,17 @@ fn resolvePeerTypes( instructions: []const Air.Inst.Ref, candidate_srcs: Module.PeerTypeCandidateSrc, ) !Type { + const mod = sema.mod; switch (instructions.len) { 0 => return Type.initTag(.noreturn), 1 => return sema.typeOf(instructions[0]), else => {}, } - const target = sema.mod.getTarget(); + const target = mod.getTarget(); var chosen = instructions[0]; - // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(). + // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(mod). // * ErrorSet: this is an override // * ErrorUnion: this is an override of the error set only // * other: at the end we make an ErrorUnion with the other thing and this @@ -30318,8 +30396,8 @@ fn resolvePeerTypes( const candidate_ty = sema.typeOf(candidate); const chosen_ty = sema.typeOf(chosen); - const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(); - const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(); + const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(mod); + const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(mod); // If the candidate can coerce into our chosen type, we're done. // If the chosen type can coerce into the candidate, use that. @@ -30347,8 +30425,8 @@ fn resolvePeerTypes( continue; }, .Int => { - const chosen_info = chosen_ty.intInfo(target); - const candidate_info = candidate_ty.intInfo(target); + const chosen_info = chosen_ty.intInfo(mod); + const candidate_info = candidate_ty.intInfo(mod); if (chosen_info.bits < candidate_info.bits) { chosen = candidate; @@ -30537,7 +30615,7 @@ fn resolvePeerTypes( // *[N]T to []T if ((cand_info.size == .Many or cand_info.size == .Slice) and chosen_info.size == .One and - chosen_info.pointee_type.zigTypeTag() == .Array) + chosen_info.pointee_type.zigTypeTag(mod) == .Array) { // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` convert_to_slice = false; @@ -30546,7 +30624,7 @@ fn resolvePeerTypes( continue; } if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` @@ -30559,8 +30637,8 @@ fn resolvePeerTypes( // Keep the one whose element type can be coerced into. if (chosen_info.size == .One and cand_info.size == .One and - chosen_info.pointee_type.zigTypeTag() == .Array and - cand_info.pointee_type.zigTypeTag() == .Array) + chosen_info.pointee_type.zigTypeTag(mod) == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array) { const chosen_elem_ty = chosen_info.pointee_type.childType(); const cand_elem_ty = cand_info.pointee_type.childType(); @@ -30631,7 +30709,7 @@ fn resolvePeerTypes( .Optional => { var opt_child_buf: Type.Payload.ElemType = undefined; const chosen_ptr_ty = chosen_ty.optionalChild(&opt_child_buf); - if (chosen_ptr_ty.zigTypeTag() == .Pointer) { + if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { const chosen_info = chosen_ptr_ty.ptrInfo().data; seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30639,7 +30717,7 @@ fn resolvePeerTypes( // *[N]T to ?![*]T // *[N]T to ?![]T if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { continue; @@ -30648,7 +30726,7 @@ fn resolvePeerTypes( }, .ErrorUnion => { const chosen_ptr_ty = chosen_ty.errorUnionPayload(); - if (chosen_ptr_ty.zigTypeTag() == .Pointer) { + if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { const chosen_info = chosen_ptr_ty.ptrInfo().data; seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30656,7 +30734,7 @@ fn resolvePeerTypes( // *[N]T to E![*]T // *[N]T to E![]T if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { continue; @@ -30664,7 +30742,7 @@ fn resolvePeerTypes( } }, .Fn => { - if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag() == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { + if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag(mod) == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { chosen = candidate; chosen_i = candidate_i + 1; continue; @@ -30697,16 +30775,16 @@ fn resolvePeerTypes( const chosen_child_ty = chosen_ty.childType(); const candidate_child_ty = candidate_ty.childType(); - if (chosen_child_ty.zigTypeTag() == .Int and candidate_child_ty.zigTypeTag() == .Int) { - const chosen_info = chosen_child_ty.intInfo(target); - const candidate_info = candidate_child_ty.intInfo(target); + if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) { + const chosen_info = chosen_child_ty.intInfo(mod); + const candidate_info = candidate_child_ty.intInfo(mod); if (chosen_info.bits < candidate_info.bits) { chosen = candidate; chosen_i = candidate_i + 1; } continue; } - if (chosen_child_ty.zigTypeTag() == .Float and candidate_child_ty.zigTypeTag() == .Float) { + if (chosen_child_ty.zigTypeTag(mod) == .Float and candidate_child_ty.zigTypeTag(mod) == .Float) { if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { chosen = candidate; chosen_i = candidate_i + 1; @@ -30725,7 +30803,7 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag() == .Fn) { + .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag(mod) == .Fn) { if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) { continue; } @@ -30790,27 +30868,27 @@ fn resolvePeerTypes( // the source locations. const chosen_src = candidate_srcs.resolve( sema.gpa, - sema.mod.declPtr(block.src_decl), + mod.declPtr(block.src_decl), chosen_i, ); const candidate_src = candidate_srcs.resolve( sema.gpa, - sema.mod.declPtr(block.src_decl), + mod.declPtr(block.src_decl), candidate_i + 1, ); const msg = msg: { const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{ - chosen_ty.fmt(sema.mod), - candidate_ty.fmt(sema.mod), + chosen_ty.fmt(mod), + candidate_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (chosen_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(sema.mod)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(mod)}); if (candidate_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(sema.mod)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(mod)}); break :msg msg; }; @@ -30826,72 +30904,73 @@ fn resolvePeerTypes( info.data.sentinel = chosen_child_ty.sentinel(); info.data.size = .Slice; info.data.mutable = !(seen_const or chosen_child_ty.isConstPtr()); - info.data.pointee_type = chosen_child_ty.elemType2(); + info.data.pointee_type = chosen_child_ty.elemType2(mod); - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); } if (seen_const) { // turn []T => []const T - switch (chosen_ty.zigTypeTag()) { + switch (chosen_ty.zigTypeTag(mod)) { .ErrorUnion => { const ptr_ty = chosen_ty.errorUnionPayload(); var info = ptr_ty.ptrInfo(); info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); }, .Pointer => { var info = chosen_ty.ptrInfo(); info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); const opt_ptr_ty = if (any_are_null) try Type.optional(sema.arena, new_ptr_ty) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); }, else => return chosen_ty, } } if (any_are_null) { - const opt_ty = switch (chosen_ty.zigTypeTag()) { + const opt_ty = switch (chosen_ty.zigTypeTag(mod)) { .Null, .Optional => chosen_ty, else => try Type.optional(sema.arena, chosen_ty), }; const set_ty = err_set_ty orelse return opt_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ty, sema.mod); + return try Type.errorUnion(sema.arena, set_ty, opt_ty, mod); } - if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag()) { + if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag(mod)) { .ErrorSet => return ty, .ErrorUnion => { const payload_ty = chosen_ty.errorUnionPayload(); - return try Type.errorUnion(sema.arena, ty, payload_ty, sema.mod); + return try Type.errorUnion(sema.arena, ty, payload_ty, mod); }, - else => return try Type.errorUnion(sema.arena, ty, chosen_ty, sema.mod), + else => return try Type.errorUnion(sema.arena, ty, chosen_ty, mod), }; return chosen_ty; } pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileError!void { + const mod = sema.mod; try sema.resolveTypeFully(fn_info.return_type); - if (sema.mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError()) { + if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } @@ -30943,7 +31022,8 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { } pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Struct => return sema.resolveStructLayout(ty), .Union => return sema.resolveUnionLayout(ty), .Array => { @@ -31021,7 +31101,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { struct_obj.status = .have_layout; _ = try sema.resolveTypeRequiresComptime(resolved_ty); - if (struct_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) { + if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, struct_obj.srcLoc(sema.mod), @@ -31043,7 +31123,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { }; for (struct_obj.fields.values(), 0..) |field, i| { - optimized_order[i] = if (field.ty.hasRuntimeBits()) + optimized_order[i] = if (!(try sema.typeHasRuntimeBits(field.ty))) @intCast(u32, i) else Module.Struct.omitted_field; @@ -31054,11 +31134,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { sema: *Sema, fn lessThan(ctx: @This(), a: u32, b: u32) bool { + const m = ctx.sema.mod; if (a == Module.Struct.omitted_field) return false; if (b == Module.Struct.omitted_field) return true; - const target = ctx.sema.mod.getTarget(); - return ctx.struct_obj.fields.values()[a].ty.abiAlignment(target) > - ctx.struct_obj.fields.values()[b].ty.abiAlignment(target); + return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) > + ctx.struct_obj.fields.values()[b].ty.abiAlignment(m); } }; mem.sort(u32, optimized_order, AlignSortContext{ @@ -31073,11 +31153,10 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; - const target = mod.getTarget(); var fields_bit_sum: u64 = 0; for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(target); + fields_bit_sum += field.ty.bitSize(mod); } const decl_index = struct_obj.owner_decl; @@ -31178,32 +31257,29 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, fields_bit_sum), - }; - struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(decl_arena_allocator); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } } fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { - const target = sema.mod.getTarget(); + const mod = sema.mod; - if (!backing_int_ty.isInt()) { + if (!backing_int_ty.isInt(mod)) { return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)}); } - if (backing_int_ty.bitSize(target) != fields_bit_sum) { + if (backing_int_ty.bitSize(mod) != fields_bit_sum) { return sema.fail( block, src, "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}", - .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(target), fields_bit_sum }, + .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum }, ); } } fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - if (!ty.isIndexable()) { + const mod = sema.mod; + if (!ty.isIndexable(mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -31215,12 +31291,13 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { } fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - if (ty.zigTypeTag() == .Pointer) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Pointer) { switch (ty.ptrSize()) { .Slice, .Many, .C => return, .One => { const elem_ty = ty.childType(); - if (elem_ty.zigTypeTag() == .Array) return; + if (elem_ty.zigTypeTag(mod) == .Array) return; // TODO https://github.com/ziglang/zig/issues/15479 // if (elem_ty.isTuple()) return; }, @@ -31270,7 +31347,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { union_obj.status = .have_layout; _ = try sema.resolveTypeRequiresComptime(resolved_ty); - if (union_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) { + if (union_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, union_obj.srcLoc(sema.mod), @@ -31285,6 +31362,23 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { // for hasRuntimeBits() of each field, so we need "requires comptime" // to be known already before this function returns. pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { + const mod = sema.mod; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return false, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + return switch (ty.tag()) { .u1, .u8, @@ -31349,8 +31443,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .generic_poison, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .enum_simple, => false, @@ -31360,11 +31452,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .comptime_float, .enum_literal, .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, => true, @@ -31387,7 +31474,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .mut_slice, => { const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { + if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { return sema.resolveTypeRequiresComptime(child_ty); @@ -31474,7 +31561,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { /// Returns `error.AnalysisFail` if any of the types (recursively) failed to /// be resolved. pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => { const child_ty = try sema.resolveTypeFields(ty.childType()); return sema.resolveTypeFully(child_ty); @@ -31840,7 +31928,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void type_body_len: u32 = 0, align_body_len: u32 = 0, init_body_len: u32 = 0, - type_ref: Air.Inst.Ref = .none, + type_ref: Zir.Inst.Ref = .none, }; const fields = try sema.arena.alloc(Field, fields_len); var any_inits = false; @@ -31967,7 +32055,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const field = &struct_obj.fields.values()[field_i]; field.ty = try field_ty.copy(decl_arena_allocator); - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -31981,7 +32069,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }; return sema.failWithOwnedErrorMsg(msg); } - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32010,7 +32098,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty))) { + } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) { const msg = msg: { const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32191,7 +32279,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { if (small.auto_enum_tag) { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; - if (int_tag_ty.zigTypeTag() != .Int and int_tag_ty.zigTypeTag() != .ComptimeInt) { + if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(sema.mod)}); } @@ -32220,7 +32308,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } else { // The provided type is the enum tag type. union_obj.tag_ty = try provided_ty.copy(decl_arena_allocator); - if (union_obj.tag_ty.zigTypeTag() != .Enum) { + if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) { return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}); } // The fields of the union must match the enum exactly. @@ -32281,7 +32369,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk align_ref; } else .none; - const tag_ref: Zir.Inst.Ref = if (has_tag) blk: { + const tag_ref: Air.Inst.Ref = if (has_tag) blk: { const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; break :blk try sema.resolveInst(tag_ref); @@ -32391,7 +32479,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } } - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32420,7 +32508,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i, @@ -32673,6 +32761,29 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { /// that the types are already resolved. /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { + const mod = sema.mod; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return Value.zero; + } else { + return null; + } + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + switch (ty.tag()) { .f16, .f32, @@ -32712,10 +32823,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set, .error_set_merged, .error_union, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, .single_const_pointer_to_comptime_int, .array_sentinel, @@ -32803,7 +32910,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const resolved_ty = try sema.resolveTypeFields(ty); const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; // An explicit tag type is always provided for enum_numbered. - if (enum_obj.tag_ty.hasRuntimeBits()) { + if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { return null; } if (enum_obj.fields.count() == 1) { @@ -32819,7 +32926,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .enum_full => { const resolved_ty = try sema.resolveTypeFields(ty); const enum_obj = resolved_ty.castTag(.enum_full).?.data; - if (enum_obj.tag_ty.hasRuntimeBits()) { + if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { return null; } switch (enum_obj.fields.count()) { @@ -32843,7 +32950,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (tag_ty.zigTypeTag() != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { + if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { return Value.zero; } else { return null; @@ -32883,13 +32990,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .null => return Value.null, .undefined => return Value.initTag(.undef), - .int_unsigned, .int_signed => { - if (ty.cast(Type.Payload.Bits).?.data == 0) { - return Value.zero; - } else { - return null; - } - }, .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) return Value.initTag(.empty_array); @@ -32919,6 +33019,89 @@ pub fn getTmpAir(sema: Sema) Air { } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { + switch (ty.ip_index) { + .u1_type => return .u1_type, + .u8_type => return .u8_type, + .i8_type => return .i8_type, + .u16_type => return .u16_type, + .i16_type => return .i16_type, + .u29_type => return .u29_type, + .u32_type => return .u32_type, + .i32_type => return .i32_type, + .u64_type => return .u64_type, + .i64_type => return .i64_type, + .u80_type => return .u80_type, + .u128_type => return .u128_type, + .i128_type => return .i128_type, + .usize_type => return .usize_type, + .isize_type => return .isize_type, + .c_char_type => return .c_char_type, + .c_short_type => return .c_short_type, + .c_ushort_type => return .c_ushort_type, + .c_int_type => return .c_int_type, + .c_uint_type => return .c_uint_type, + .c_long_type => return .c_long_type, + .c_ulong_type => return .c_ulong_type, + .c_longlong_type => return .c_longlong_type, + .c_ulonglong_type => return .c_ulonglong_type, + .c_longdouble_type => return .c_longdouble_type, + .f16_type => return .f16_type, + .f32_type => return .f32_type, + .f64_type => return .f64_type, + .f80_type => return .f80_type, + .f128_type => return .f128_type, + .anyopaque_type => return .anyopaque_type, + .bool_type => return .bool_type, + .void_type => return .void_type, + .type_type => return .type_type, + .anyerror_type => return .anyerror_type, + .comptime_int_type => return .comptime_int_type, + .comptime_float_type => return .comptime_float_type, + .noreturn_type => return .noreturn_type, + .anyframe_type => return .anyframe_type, + .null_type => return .null_type, + .undefined_type => return .undefined_type, + .enum_literal_type => return .enum_literal_type, + .atomic_order_type => return .atomic_order_type, + .atomic_rmw_op_type => return .atomic_rmw_op_type, + .calling_convention_type => return .calling_convention_type, + .address_space_type => return .address_space_type, + .float_mode_type => return .float_mode_type, + .reduce_op_type => return .reduce_op_type, + .call_modifier_type => return .call_modifier_type, + .prefetch_options_type => return .prefetch_options_type, + .export_options_type => return .export_options_type, + .extern_options_type => return .extern_options_type, + .type_info_type => return .type_info_type, + .manyptr_u8_type => return .manyptr_u8_type, + .manyptr_const_u8_type => return .manyptr_const_u8_type, + .single_const_pointer_to_comptime_int_type => return .single_const_pointer_to_comptime_int_type, + .const_slice_u8_type => return .const_slice_u8_type, + .anyerror_void_error_union_type => return .anyerror_void_error_union_type, + .generic_poison_type => return .generic_poison_type, + .var_args_param_type => return .var_args_param_type, + .empty_struct_type => return .empty_struct_type, + + // values + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .one => unreachable, + .one_usize => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + _ => {}, + + .none => unreachable, + } switch (ty.tag()) { .u1 => return .u1_type, .u8 => return .u8_type, @@ -32934,6 +33117,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .i128 => return .i128_type, .usize => return .usize_type, .isize => return .isize_type, + .c_char => return .c_char_type, .c_short => return .c_short_type, .c_ushort => return .c_ushort_type, .c_int => return .c_int_type, @@ -32966,17 +33150,13 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .address_space => return .address_space_type, .float_mode => return .float_mode_type, .reduce_op => return .reduce_op_type, - .modifier => return .modifier_type, + .modifier => return .call_modifier_type, .prefetch_options => return .prefetch_options_type, .export_options => return .export_options_type, .extern_options => return .extern_options_type, .type_info => return .type_info_type, .manyptr_u8 => return .manyptr_u8_type, .manyptr_const_u8 => return .manyptr_const_u8_type, - .fn_noreturn_no_args => return .fn_noreturn_no_args_type, - .fn_void_no_args => return .fn_void_no_args_type, - .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, .const_slice_u8 => return .const_slice_u8_type, .anyerror_void_error_union => return .anyerror_void_error_union_type, @@ -33186,7 +33366,8 @@ const DerefResult = union(enum) { }; fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type, want_mutable: bool) CompileError!DerefResult { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) { error.RuntimeLoad => return DerefResult{ .runtime_load = {} }, else => |e| return e, @@ -33211,7 +33392,7 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value // The type is not in-memory coercible or the direct dereference failed, so it must // be bitcast according to the pointer type we are performing the load through. - if (!load_ty.hasWellDefinedLayout()) { + if (!load_ty.hasWellDefinedLayout(mod)) { return DerefResult{ .needed_well_defined = load_ty }; } @@ -33253,6 +33434,7 @@ fn typePtrOrOptionalPtrTy( ty: Type, buf: *Type.Payload.ElemType, ) !?Type { + const mod = sema.mod; switch (ty.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, @@ -33281,7 +33463,7 @@ fn typePtrOrOptionalPtrTy( .optional => { const child_type = ty.optionalChild(buf); - if (child_type.zigTypeTag() != .Pointer) return null; + if (child_type.zigTypeTag(mod) != .Pointer) return null; const info = child_type.ptrInfo().data; switch (info.size) { @@ -33310,6 +33492,23 @@ fn typePtrOrOptionalPtrTy( /// TODO merge these implementations together with the "advanced"/opt_sema pattern seen /// elsewhere in value.zig pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { + const mod = sema.mod; + if (ty.ip_index != .none) { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return false, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + } + } return switch (ty.tag()) { .u1, .u8, @@ -33374,8 +33573,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .generic_poison, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .enum_simple, => false, @@ -33385,11 +33582,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .comptime_float, .enum_literal, .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, => true, @@ -33412,7 +33604,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .mut_slice, => { const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { + if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { return sema.typeRequiresComptime(child_ty); @@ -33504,7 +33696,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - return ty.hasRuntimeBitsAdvanced(false, .{ .sema = sema }) catch |err| switch (err) { + const mod = sema.mod; + return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; @@ -33512,19 +33705,18 @@ pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { fn typeAbiSize(sema: *Sema, ty: Type) !u64 { try sema.resolveTypeLayout(ty); - const target = sema.mod.getTarget(); - return ty.abiSize(target); + return ty.abiSize(sema.mod); } fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 { - const target = sema.mod.getTarget(); - return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar; } /// Not valid to call for packed unions. /// Keep implementation in sync with `Module.Union.Field.normalAlignment`. fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { - if (field.ty.zigTypeTag() == .NoReturn) { + const mod = sema.mod; + if (field.ty.zigTypeTag(mod) == .NoReturn) { return @as(u32, 0); } else if (field.abi_align == 0) { return sema.typeAbiAlignment(field.ty); @@ -33605,13 +33797,14 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { } fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); + const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -33620,13 +33813,13 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { } fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const target = sema.mod.getTarget(); - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -33645,7 +33838,8 @@ fn numberAddWrapScalar( ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - if (ty.zigTypeTag() == .ComptimeInt) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intAdd(lhs, rhs, ty); } @@ -33663,7 +33857,8 @@ fn intSub( rhs: Value, ty: Type, ) !Value { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; @@ -33678,13 +33873,13 @@ fn intSub( } fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const target = sema.mod.getTarget(); - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, @@ -33703,7 +33898,8 @@ fn numberSubWrapScalar( ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - if (ty.zigTypeTag() == .ComptimeInt) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intSub(lhs, rhs, ty); } @@ -33721,14 +33917,15 @@ fn floatAdd( rhs: Value, float_type: Type, ) !Value { - if (float_type.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType()); + scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -33778,14 +33975,15 @@ fn floatSub( rhs: Value, float_type: Type, ) !Value { - if (float_type.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType()); + scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -33835,7 +34033,8 @@ fn intSubWithOverflow( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -33843,7 +34042,7 @@ fn intSubWithOverflow( var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); + const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -33861,13 +34060,13 @@ fn intSubWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const target = sema.mod.getTarget(); - const info = ty.intInfo(target); + const mod = sema.mod; + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -33889,13 +34088,14 @@ fn floatToInt( float_ty: Type, int_ty: Type, ) CompileError!Value { - if (float_ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (float_ty.zigTypeTag(mod) == .Vector) { const elem_ty = float_ty.childType(); const result_data = try sema.arena.alloc(Value, float_ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(sema.mod, i, &buf); - scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType()); + scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); } @@ -33976,7 +34176,8 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); switch (val.tag()) { .zero, .undef, @@ -33985,9 +34186,9 @@ fn intFitsInType( .one, .bool_true, - => switch (ty.zigTypeTag()) { + => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); return switch (info.signedness) { .signed => info.bits >= 2, .unsigned => info.bits >= 1, @@ -33997,9 +34198,9 @@ fn intFitsInType( else => unreachable, }, - .lazy_align => switch (ty.zigTypeTag()) { + .lazy_align => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); // If it is u16 or bigger we know the alignment fits without resolving it. if (info.bits >= max_needed_bits) return true; @@ -34011,9 +34212,9 @@ fn intFitsInType( .ComptimeInt => return true, else => unreachable, }, - .lazy_size => switch (ty.zigTypeTag()) { + .lazy_size => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); // If it is u64 or bigger we know the size fits without resolving it. if (info.bits >= max_needed_bits) return true; @@ -34026,41 +34227,41 @@ fn intFitsInType( else => unreachable, }, - .int_u64 => switch (ty.zigTypeTag()) { + .int_u64 => switch (ty.zigTypeTag(mod)) { .Int => { const x = val.castTag(.int_u64).?.data; if (x == 0) return true; - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); return info.bits >= needed_bits; }, .ComptimeInt => return true, else => unreachable, }, - .int_i64 => switch (ty.zigTypeTag()) { + .int_i64 => switch (ty.zigTypeTag(mod)) { .Int => { const x = val.castTag(.int_i64).?.data; if (x == 0) return true; - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.signedness == .unsigned and x < 0) return false; var buffer: Value.BigIntSpace = undefined; - return (try val.toBigIntAdvanced(&buffer, target, sema)).fitsInTwosComp(info.signedness, info.bits); + return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, else => unreachable, }, - .int_big_positive => switch (ty.zigTypeTag()) { + .int_big_positive => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, else => unreachable, }, - .int_big_negative => switch (ty.zigTypeTag()) { + .int_big_negative => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); }, .ComptimeInt => return true, @@ -34068,7 +34269,7 @@ fn intFitsInType( }, .the_only_possible_value => { - assert(ty.intInfo(target).bits == 0); + assert(ty.intInfo(mod).bits == 0); return true; }, @@ -34077,9 +34278,9 @@ fn intFitsInType( .decl_ref, .function, .variable, - => switch (ty.zigTypeTag()) { + => switch (ty.zigTypeTag(mod)) { .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { .signed => info.bits > ptr_bits, @@ -34091,9 +34292,9 @@ fn intFitsInType( }, .aggregate => { - assert(ty.zigTypeTag() == .Vector); + assert(ty.zigTypeTag(mod) == .Vector); for (val.castTag(.aggregate).?.data, 0..) |elem, i| { - if (!(try sema.intFitsInType(elem, ty.scalarType(), null))) { + if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) { if (vector_index) |some| some.* = i; return false; } @@ -34122,11 +34323,8 @@ fn intInRange( } /// Asserts the type is an enum. -fn enumHasInt( - sema: *Sema, - ty: Type, - int: Value, -) CompileError!bool { +fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { + const mod = sema.mod; switch (ty.tag()) { .enum_nonexhaustive => unreachable, .enum_full => { @@ -34157,11 +34355,7 @@ fn enumHasInt( const enum_simple = ty.castTag(.enum_simple).?.data; const fields_len = enum_simple.fields.count(); const bits = std.math.log2_int_ceil(usize, fields_len); - var buffer: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - const tag_ty = Type.initPayload(&buffer.base); + const tag_ty = try mod.intType(.unsigned, bits); return sema.intInRange(tag_ty, int, fields_len); }, .atomic_order, @@ -34186,7 +34380,8 @@ fn intAddWithOverflow( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -34194,7 +34389,7 @@ fn intAddWithOverflow( var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); + const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -34212,13 +34407,13 @@ fn intAddWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const target = sema.mod.getTarget(); - const info = ty.intInfo(target); + const mod = sema.mod; + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -34243,14 +34438,15 @@ fn compareAll( rhs: Value, ty: Type, ) CompileError!bool { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen()) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()))) { + if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) { return false; } } @@ -34270,7 +34466,7 @@ fn compareScalar( switch (op) { .eq => return sema.valuesEqual(lhs, rhs, ty), .neq => return !(try sema.valuesEqual(lhs, rhs, ty)), - else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod.getTarget(), sema), + else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod, sema), } } @@ -34291,14 +34487,15 @@ fn compareVector( rhs: Value, ty: Type, ) !Value { - assert(ty.zigTypeTag() == .Vector); + const mod = sema.mod; + assert(ty.zigTypeTag(mod) == .Vector); const result_data = try sema.arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()); + const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); scalar.* = Value.makeBool(res_bool); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -34312,10 +34509,10 @@ fn compareVector( /// Handles const-ness and address spaces in particular. /// This code is duplicated in `analyzePtrArithmetic`. fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { + const mod = sema.mod; const ptr_info = ptr_ty.ptrInfo().data; - const elem_ty = ptr_ty.elemType2(); + const elem_ty = ptr_ty.elemType2(mod); const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0; - const target = sema.mod.getTarget(); const parent_ty = ptr_ty.childType(); const VI = Type.Payload.Pointer.Data.VectorIndex; @@ -34325,14 +34522,14 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { alignment: u32 = 0, vector_index: VI = .none, } = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: { - const elem_bits = elem_ty.bitSize(target); + const elem_bits = elem_ty.bitSize(mod); if (elem_bits == 0) break :blk .{}; const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); if (!is_packed) break :blk .{}; break :blk .{ .host_size = @intCast(u16, parent_ty.arrayLen()), - .alignment = @intCast(u16, parent_ty.abiAlignment(target)), + .alignment = @intCast(u16, parent_ty.abiAlignment(mod)), .vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime, }; } else .{}; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index d74fbda93e..dc556942c3 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -71,7 +71,6 @@ pub fn print( level: u8, mod: *Module, ) @TypeOf(writer).Error!void { - const target = mod.getTarget(); var val = tv.val; var ty = tv.ty; if (val.isVariable(mod)) @@ -117,10 +116,6 @@ pub fn print( .noreturn_type => return writer.writeAll("noreturn"), .null_type => return writer.writeAll("@Type(.Null)"), .undefined_type => return writer.writeAll("@Type(.Undefined)"), - .fn_noreturn_no_args_type => return writer.writeAll("fn() noreturn"), - .fn_void_no_args_type => return writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args_type => return writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args_type => return writer.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"), .anyframe_type => return writer.writeAll("anyframe"), .const_slice_u8_type => return writer.writeAll("[]const u8"), @@ -147,7 +142,7 @@ pub fn print( if (level == 0) { return writer.writeAll(".{ ... }"); } - if (ty.zigTypeTag() == .Struct) { + if (ty.zigTypeTag(mod) == .Struct) { try writer.writeAll(".{"); const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); @@ -160,7 +155,7 @@ pub fn print( } try print(.{ .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, i), + .val = val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (ty.structFieldCount() > max_aggregate_items) { @@ -168,7 +163,7 @@ pub fn print( } return writer.writeAll("}"); } else { - const elem_ty = ty.elemType2(); + const elem_ty = ty.elemType2(mod); const len = ty.arrayLen(); if (elem_ty.eql(Type.u8, mod)) str: { @@ -177,9 +172,9 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, i); + const elem = val.fieldValue(ty, mod, i); if (elem.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str; + buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } const truncated = if (len > max_string_len) " (truncated)" else ""; @@ -194,7 +189,7 @@ pub fn print( if (i != 0) try writer.writeAll(", "); try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, i), + .val = val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (len > max_aggregate_items) { @@ -232,25 +227,18 @@ pub fn print( .bool_true => return writer.writeAll("true"), .bool_false => return writer.writeAll("false"), .ty => return val.castTag(.ty).?.data.print(writer, mod), - .int_type => { - const int_type = val.castTag(.int_type).?.data; - return writer.print("{s}{d}", .{ - if (int_type.signed) "s" else "u", - int_type.bits, - }); - }, .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), .lazy_align => { const sub_ty = val.castTag(.lazy_align).?.data; - const x = sub_ty.abiAlignment(target); + const x = sub_ty.abiAlignment(mod); return writer.print("{d}", .{x}); }, .lazy_size => { const sub_ty = val.castTag(.lazy_size).?.data; - const x = sub_ty.abiSize(target); + const x = sub_ty.abiSize(mod); return writer.print("{d}", .{x}); }, .function => return writer.print("(function '{s}')", .{ @@ -315,7 +303,7 @@ pub fn print( }, writer, level - 1, mod); } - if (field_ptr.container_ty.zigTypeTag() == .Struct) { + if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { switch (field_ptr.container_ty.tag()) { .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), else => { @@ -323,7 +311,7 @@ pub fn print( return writer.print(".{s}", .{field_name}); }, } - } else if (field_ptr.container_ty.zigTypeTag() == .Union) { + } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; return writer.print(".{s}", .{field_name}); } else if (field_ptr.container_ty.isSlice()) { @@ -352,7 +340,7 @@ pub fn print( var i: u32 = 0; try writer.writeAll(".{ "); const elem_tv = TypedValue{ - .ty = ty.elemType2(), + .ty = ty.elemType2(mod), .val = val.castTag(.repeated).?.data, }; const len = ty.arrayLen(); @@ -372,7 +360,7 @@ pub fn print( } try writer.writeAll(".{ "); try print(.{ - .ty = ty.elemType2(), + .ty = ty.elemType2(mod), .val = ty.sentinel().?, }, writer, level - 1, mod); return writer.writeAll(" }"); @@ -382,8 +370,8 @@ pub fn print( return writer.writeAll(".{ ... }"); } const payload = val.castTag(.slice).?.data; - const elem_ty = ty.elemType2(); - const len = payload.len.toUnsignedInt(target); + const elem_ty = ty.elemType2(mod); + const len = payload.len.toUnsignedInt(mod); if (elem_ty.eql(Type.u8, mod)) str: { const max_len = @intCast(usize, std.math.min(len, max_string_len)); @@ -394,7 +382,7 @@ pub fn print( var elem_buf: Value.ElemValueBuffer = undefined; const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); if (elem_val.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str; + buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } // TODO would be nice if this had a bit of unicode awareness. diff --git a/src/Zir.zig b/src/Zir.zig index 2bd5b21f79..1063377fc7 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -19,6 +19,7 @@ const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Ast = std.zig.Ast; +const InternPool = @import("InternPool.zig"); const Zir = @This(); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; @@ -2041,448 +2042,95 @@ pub const Inst = struct { /// The position of a ZIR instruction within the `Zir` instructions array. pub const Index = u32; - /// A reference to a TypedValue or ZIR instruction. + /// A reference to ZIR instruction, or to an InternPool index, or neither. /// - /// If the Ref has a tag in this enum, it refers to a TypedValue. - /// - /// If the value of a Ref does not have a tag, it refers to a ZIR instruction. - /// - /// The first values after the the last tag refer to ZIR instructions which may - /// be derived by subtracting `typed_value_map.len`. - /// - /// When adding a tag to this enum, consider adding a corresponding entry to - /// `primitives` in astgen. + /// If the integer tag value is < InternPool.static_len, then it + /// corresponds to an InternPool index. Otherwise, this refers to a ZIR + /// instruction. /// /// The tag type is specified so that it is safe to bitcast between `[]u32` /// and `[]Ref`. pub const Ref = enum(u32) { + u1_type = @enumToInt(InternPool.Index.u1_type), + u8_type = @enumToInt(InternPool.Index.u8_type), + i8_type = @enumToInt(InternPool.Index.i8_type), + u16_type = @enumToInt(InternPool.Index.u16_type), + i16_type = @enumToInt(InternPool.Index.i16_type), + u29_type = @enumToInt(InternPool.Index.u29_type), + u32_type = @enumToInt(InternPool.Index.u32_type), + i32_type = @enumToInt(InternPool.Index.i32_type), + u64_type = @enumToInt(InternPool.Index.u64_type), + i64_type = @enumToInt(InternPool.Index.i64_type), + u80_type = @enumToInt(InternPool.Index.u80_type), + u128_type = @enumToInt(InternPool.Index.u128_type), + i128_type = @enumToInt(InternPool.Index.i128_type), + usize_type = @enumToInt(InternPool.Index.usize_type), + isize_type = @enumToInt(InternPool.Index.isize_type), + c_char_type = @enumToInt(InternPool.Index.c_char_type), + c_short_type = @enumToInt(InternPool.Index.c_short_type), + c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type), + c_int_type = @enumToInt(InternPool.Index.c_int_type), + c_uint_type = @enumToInt(InternPool.Index.c_uint_type), + c_long_type = @enumToInt(InternPool.Index.c_long_type), + c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type), + c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type), + c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type), + c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type), + f16_type = @enumToInt(InternPool.Index.f16_type), + f32_type = @enumToInt(InternPool.Index.f32_type), + f64_type = @enumToInt(InternPool.Index.f64_type), + f80_type = @enumToInt(InternPool.Index.f80_type), + f128_type = @enumToInt(InternPool.Index.f128_type), + anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type), + bool_type = @enumToInt(InternPool.Index.bool_type), + void_type = @enumToInt(InternPool.Index.void_type), + type_type = @enumToInt(InternPool.Index.type_type), + anyerror_type = @enumToInt(InternPool.Index.anyerror_type), + comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type), + comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type), + noreturn_type = @enumToInt(InternPool.Index.noreturn_type), + anyframe_type = @enumToInt(InternPool.Index.anyframe_type), + null_type = @enumToInt(InternPool.Index.null_type), + undefined_type = @enumToInt(InternPool.Index.undefined_type), + enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type), + atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type), + atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type), + calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type), + address_space_type = @enumToInt(InternPool.Index.address_space_type), + float_mode_type = @enumToInt(InternPool.Index.float_mode_type), + reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type), + call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type), + prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type), + export_options_type = @enumToInt(InternPool.Index.export_options_type), + extern_options_type = @enumToInt(InternPool.Index.extern_options_type), + type_info_type = @enumToInt(InternPool.Index.type_info_type), + manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), + manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), + const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), + anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), + generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), + empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), + undef = @enumToInt(InternPool.Index.undef), + zero = @enumToInt(InternPool.Index.zero), + zero_usize = @enumToInt(InternPool.Index.zero_usize), + one = @enumToInt(InternPool.Index.one), + one_usize = @enumToInt(InternPool.Index.one_usize), + calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), + calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), + void_value = @enumToInt(InternPool.Index.void_value), + unreachable_value = @enumToInt(InternPool.Index.unreachable_value), + null_value = @enumToInt(InternPool.Index.null_value), + bool_true = @enumToInt(InternPool.Index.bool_true), + bool_false = @enumToInt(InternPool.Index.bool_false), + empty_struct = @enumToInt(InternPool.Index.empty_struct), + generic_poison = @enumToInt(InternPool.Index.generic_poison), + /// This Ref does not correspond to any ZIR instruction or constant /// value and may instead be used as a sentinel to indicate null. - none, - - u1_type, - u8_type, - i8_type, - u16_type, - i16_type, - u29_type, - u32_type, - i32_type, - u64_type, - i64_type, - u128_type, - i128_type, - usize_type, - isize_type, - c_char_type, - c_short_type, - c_ushort_type, - c_int_type, - c_uint_type, - c_long_type, - c_ulong_type, - c_longlong_type, - c_ulonglong_type, - c_longdouble_type, - f16_type, - f32_type, - f64_type, - f80_type, - f128_type, - anyopaque_type, - bool_type, - void_type, - type_type, - anyerror_type, - comptime_int_type, - comptime_float_type, - noreturn_type, - anyframe_type, - null_type, - undefined_type, - enum_literal_type, - atomic_order_type, - atomic_rmw_op_type, - calling_convention_type, - address_space_type, - float_mode_type, - reduce_op_type, - modifier_type, - prefetch_options_type, - export_options_type, - extern_options_type, - type_info_type, - manyptr_u8_type, - manyptr_const_u8_type, - fn_noreturn_no_args_type, - fn_void_no_args_type, - fn_naked_noreturn_no_args_type, - fn_ccc_void_no_args_type, - single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - anyerror_void_error_union_type, - generic_poison_type, - - /// `undefined` (untyped) - undef, - /// `0` (comptime_int) - zero, - /// `1` (comptime_int) - one, - /// `{}` - void_value, - /// `unreachable` (noreturn type) - unreachable_value, - /// `null` (untyped) - null_value, - /// `true` - bool_true, - /// `false` - bool_false, - /// `.{}` (untyped) - empty_struct, - /// `0` (usize) - zero_usize, - /// `1` (usize) - one_usize, - /// `std.builtin.CallingConvention.C` - calling_convention_c, - /// `std.builtin.CallingConvention.Inline` - calling_convention_inline, - /// Used for generic parameters where the type and value - /// is not known until generic function instantiation. - generic_poison, - /// This is a special type for variadic parameters of a function call. - /// Casts to it will validate that the type can be passed to a c - /// calling convention function. - var_args_param, - + none = std.math.maxInt(u32), _, - - pub const typed_value_map = std.enums.directEnumArray(Ref, TypedValue, 0, .{ - .none = undefined, - - .u1_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u1_type), - }, - .u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u8_type), - }, - .i8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i8_type), - }, - .u16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u16_type), - }, - .i16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i16_type), - }, - .u29_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u29_type), - }, - .u32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u32_type), - }, - .i32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i32_type), - }, - .u64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u64_type), - }, - .i64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i64_type), - }, - .u128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u128_type), - }, - .i128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i128_type), - }, - .usize_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.usize_type), - }, - .isize_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.isize_type), - }, - .c_char_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_char_type), - }, - .c_short_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_short_type), - }, - .c_ushort_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ushort_type), - }, - .c_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_int_type), - }, - .c_uint_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_uint_type), - }, - .c_long_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_long_type), - }, - .c_ulong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ulong_type), - }, - .c_longlong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_longlong_type), - }, - .c_ulonglong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ulonglong_type), - }, - .c_longdouble_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_longdouble_type), - }, - .f16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f16_type), - }, - .f32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f32_type), - }, - .f64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f64_type), - }, - .f80_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f80_type), - }, - .f128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f128_type), - }, - .anyopaque_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyopaque_type), - }, - .bool_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.bool_type), - }, - .void_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.void_type), - }, - .type_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.type_type), - }, - .anyerror_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_type), - }, - .comptime_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.comptime_int_type), - }, - .comptime_float_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.comptime_float_type), - }, - .noreturn_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.noreturn_type), - }, - .anyframe_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyframe_type), - }, - .null_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.null_type), - }, - .undefined_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.undefined_type), - }, - .fn_noreturn_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_noreturn_no_args_type), - }, - .fn_void_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_void_no_args_type), - }, - .fn_naked_noreturn_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_naked_noreturn_no_args_type), - }, - .fn_ccc_void_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_ccc_void_no_args_type), - }, - .single_const_pointer_to_comptime_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.single_const_pointer_to_comptime_int_type), - }, - .const_slice_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.const_slice_u8_type), - }, - .anyerror_void_error_union_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_void_error_union_type), - }, - .generic_poison_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.generic_poison_type), - }, - .enum_literal_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.enum_literal_type), - }, - .manyptr_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.manyptr_u8_type), - }, - .manyptr_const_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.manyptr_const_u8_type), - }, - .atomic_order_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.atomic_order_type), - }, - .atomic_rmw_op_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.atomic_rmw_op_type), - }, - .calling_convention_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.calling_convention_type), - }, - .address_space_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.address_space_type), - }, - .float_mode_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.float_mode_type), - }, - .reduce_op_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.reduce_op_type), - }, - .modifier_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.modifier_type), - }, - .prefetch_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.prefetch_options_type), - }, - .export_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.export_options_type), - }, - .extern_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.extern_options_type), - }, - .type_info_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.type_info_type), - }, - - .undef = .{ - .ty = Type.initTag(.undefined), - .val = Value.initTag(.undef), - }, - .zero = .{ - .ty = Type.initTag(.comptime_int), - .val = Value.initTag(.zero), - }, - .zero_usize = .{ - .ty = Type.initTag(.usize), - .val = Value.initTag(.zero), - }, - .one = .{ - .ty = Type.initTag(.comptime_int), - .val = Value.initTag(.one), - }, - .one_usize = .{ - .ty = Type.initTag(.usize), - .val = Value.initTag(.one), - }, - .void_value = .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }, - .unreachable_value = .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }, - .null_value = .{ - .ty = Type.initTag(.null), - .val = Value.initTag(.null_value), - }, - .bool_true = .{ - .ty = Type.initTag(.bool), - .val = Value.initTag(.bool_true), - }, - .bool_false = .{ - .ty = Type.initTag(.bool), - .val = Value.initTag(.bool_false), - }, - .empty_struct = .{ - .ty = Type.initTag(.empty_struct_literal), - .val = Value.initTag(.empty_struct_value), - }, - .calling_convention_c = .{ - .ty = Type.initTag(.calling_convention), - .val = .{ .ptr_otherwise = &calling_convention_c_payload.base }, - }, - .calling_convention_inline = .{ - .ty = Type.initTag(.calling_convention), - .val = .{ .ptr_otherwise = &calling_convention_inline_payload.base }, - }, - .generic_poison = .{ - .ty = Type.initTag(.generic_poison), - .val = Value.initTag(.generic_poison), - }, - .var_args_param = undefined, - }); - }; - - /// We would like this to be const but `Value` wants a mutable pointer for - /// its payload field. Nothing should mutate this though. - var calling_convention_c_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @enumToInt(std.builtin.CallingConvention.C), - }; - - /// We would like this to be const but `Value` wants a mutable pointer for - /// its payload field. Nothing should mutate this though. - var calling_convention_inline_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @enumToInt(std.builtin.CallingConvention.Inline), }; /// All instructions have an 8-byte payload, which is contained within @@ -4163,7 +3811,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { }; } -const ref_start_index: u32 = Inst.Ref.typed_value_map.len; +const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Inst.Index) Inst.Ref { return @intToEnum(Inst.Ref, ref_start_index + inst); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 971ed4749d..4370977272 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -471,6 +471,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { + const mod = self.bin_file.options.module.?; const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { // stp fp, lr, [sp, #-16]! @@ -522,8 +523,8 @@ fn gen(self: *Self) !void { const ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const abi_align = ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -951,8 +952,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -1026,31 +1027,31 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { + const mod = self.bin_file.options.module.?; const elem_ty = self.air.typeOfIndex(inst).elemType(); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized // allocations will always have an offset > 0. return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1177,13 +1178,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand = ty_op.operand; const operand_mcv = try self.resolveInst(operand); const operand_ty = self.air.typeOf(operand); - const operand_info = operand_ty.intInfo(self.target.*); + const operand_info = operand_ty.intInfo(mod); const dest_ty = self.air.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(self.target.*); + const dest_info = dest_ty.intInfo(mod); const result: MCValue = result: { const operand_lock: ?RegisterLock = switch (operand_mcv) { @@ -1257,8 +1259,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 64) { const operand_reg = switch (operand) { @@ -1319,6 +1322,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); @@ -1327,7 +1331,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { .unreach => unreachable, .compare_flags => |cond| break :result MCValue{ .compare_flags = cond.negate() }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { // TODO convert this to mvn + and const op_reg = switch (operand) { @@ -1361,7 +1365,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 64) { const op_reg = switch (operand) { .register => |r| r, @@ -1413,13 +1417,13 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -1907,12 +1911,12 @@ fn addSub( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -1968,11 +1972,11 @@ fn mul( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO add optimisations for multiplication // with immediates, for example a * 2 can be @@ -1999,7 +2003,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div_float", .{}), .Vector => return self.fail("TODO div_float on vectors", .{}), else => unreachable, @@ -2015,12 +2020,12 @@ fn divTrunc( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2049,12 +2054,12 @@ fn divFloor( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2082,12 +2087,12 @@ fn divExact( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2118,12 +2123,12 @@ fn rem( _ = maybe_inst; const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO rem/mod on floats", .{}), .Vector => return self.fail("TODO rem/mod on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -2188,7 +2193,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO mod on floats", .{}), .Vector => return self.fail("TODO mod on vectors", .{}), .Int => return self.fail("TODO mod on ints", .{}), @@ -2205,10 +2211,11 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Generate an add/sub/mul const result: MCValue = switch (tag) { @@ -2240,11 +2247,11 @@ fn bitwise( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO implement bitwise operations with immediates const mir_tag: Mir.Inst.Tag = switch (tag) { @@ -2274,10 +2281,11 @@ fn shiftExact( ) InnerError!MCValue { _ = rhs_ty; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -2323,10 +2331,11 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Generate a shl_exact/shr_exact const result: MCValue = switch (tag) { @@ -2362,7 +2371,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert((try lhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema assert((try rhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema @@ -2388,9 +2398,9 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { - const mod = self.bin_file.options.module.?; assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; @@ -2398,7 +2408,7 @@ fn ptrArithmetic( .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -2511,6 +2521,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -2518,16 +2529,15 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 1...31, 33...63 => { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2639,24 +2649,23 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { - const mod = self.bin_file.options.module.?; - const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2864,6 +2873,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -2871,14 +2881,14 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -3011,10 +3021,11 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { } fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue { + const mod = self.bin_file.options.module.?; var opt_buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&opt_buf); - if (!payload_ty.hasRuntimeBits()) return MCValue.none; - if (optional_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none; + if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we reuse the operand here? const raw_reg = try self.register_manager.allocReg(inst, gp); const reg = self.registerAlias(raw_reg, payload_ty); @@ -3055,16 +3066,17 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3086,7 +3098,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -3134,16 +3146,17 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3165,10 +3178,10 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ // Set both registers to the X variant to get the full width @@ -3245,6 +3258,7 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { @@ -3253,7 +3267,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBits(mod)) { break :result MCValue{ .immediate = 1 }; } @@ -3265,7 +3279,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we check if we can reuse the operand? const raw_reg = try self.register_manager.allocReg(inst, gp); const reg = self.registerAlias(raw_reg, payload_ty); @@ -3273,9 +3287,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .register = reg }; } - const optional_abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); - const optional_abi_align = optional_ty.abiAlignment(self.target.*); - const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const optional_abi_size = @intCast(u32, optional_ty.abiSize(mod)); + const optional_abi_align = optional_ty.abiAlignment(mod); + const offset = @intCast(u32, payload_ty.abiSize(mod)); const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst); try self.genSetStack(payload_ty, stack_offset, operand); @@ -3289,19 +3303,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); @@ -3314,17 +3329,18 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); @@ -3440,8 +3456,9 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); // TODO optimize for elem_sizes of 1, 2, 4, 8 switch (elem_size) { @@ -3597,8 +3614,9 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.elemType(); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -3846,9 +3864,10 @@ fn genInlineMemsetCode( fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -3874,11 +3893,12 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate, 4 => .ldr_immediate, 8 => .ldr_immediate, 3, 5, 6, 7 => return self.fail("TODO: genLdrRegister for more abi_sizes", .{}), @@ -3896,7 +3916,8 @@ fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb_immediate, @@ -3917,8 +3938,9 @@ fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; log.debug("store: storing {} to {}", .{ value, ptr }); - const abi_size = value_ty.abiSize(self.target.*); + const abi_size = value_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4069,10 +4091,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4093,10 +4116,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); const struct_field_ty = struct_ty.structFieldType(index); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .dead, .unreach => unreachable, @@ -4142,12 +4166,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); const struct_ty = self.air.getRefType(ty_pl.ty).childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -4223,8 +4248,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.air.typeOf(callee); + const mod = self.bin_file.options.module.?; - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -4246,8 +4272,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); + const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); const ret_ptr_reg = self.registerAlias(.x0, Type.usize); @@ -4289,8 +4315,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - const mod = self.bin_file.options.module.?; - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -4369,7 +4394,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .x30, mcv); @@ -4410,11 +4435,12 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (self.ret_mcv) { .none => {}, .immediate => { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); }, .register => |reg| { // Return result by value @@ -4465,8 +4491,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const abi_align = ret_ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4501,21 +4528,21 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO ARM cmp non-pointer optionals", .{}); } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(), .Int => lhs_ty, .Bool => Type.initTag(.u1), .Pointer => Type.usize, @@ -4523,7 +4550,7 @@ fn cmp( else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 64) { try self.spillCompareFlagsIfOccupied(); @@ -4687,8 +4714,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -4819,13 +4846,14 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { - const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional()) blk: { + const mod = self.bin_file.options.module.?; + const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: { var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; - const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const offset = @intCast(u32, payload_ty.abiSize(mod)); const operand_mcv = try operand_bind.resolveToMcv(self); const new_mcv: MCValue = switch (operand_mcv) { .register => |source_reg| new: { @@ -4838,7 +4866,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { try self.genSetReg(payload_ty, dest_reg, operand_mcv); } else { _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.asr_immediate else Mir.Inst.Tag.lsr_immediate, @@ -5210,9 +5238,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { + const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5386,7 +5415,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5445,7 +5475,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); @@ -5559,6 +5589,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5669,13 +5700,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void try self.genLdrRegister(reg, reg.toX(), ty); }, .stack_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); switch (abi_size) { 1, 2, 4, 8 => { const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack, 4, 8 => .ldr_stack, else => unreachable, // unexpected abi size }; @@ -5693,13 +5724,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); switch (abi_size) { 1, 2, 4, 8 => { const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, 4, 8 => .ldr_stack_argument, else => unreachable, // unexpected abi size }; @@ -5720,7 +5751,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5728,7 +5760,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(self.target.*)) { + switch (ty.abiSize(mod)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -6087,14 +6119,15 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn airTry(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const error_union_align = error_union_ty.abiAlignment(self.target.*); + const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6123,22 +6156,18 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.value(inst, mod).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. @@ -6222,6 +6251,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -6236,14 +6266,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var ncrn: usize = 0; // Next Core Register Number var nsaa: u32 = 0; // Next stacked argument address - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { result.return_value = .{ .register = self.registerAlias(c_abi_int_return_regs[0], ret_ty) }; @@ -6253,7 +6283,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (param_size == 0) { result.args[i] = .{ .none = {} }; continue; @@ -6261,7 +6291,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned // values to spread across odd-numbered registers. - if (ty.abiAlignment(self.target.*) == 16 and !self.target.isDarwin()) { + if (ty.abiAlignment(mod) == 16 and !self.target.isDarwin()) { // Round up NCRN to the next even number ncrn += ncrn % 2; } @@ -6279,7 +6309,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided // that the entire stack space consumed by the arguments is 8-byte aligned. - if (ty.abiAlignment(self.target.*) == 8) { + if (ty.abiAlignment(mod) == 8) { if (nsaa % 8 != 0) { nsaa += 8 - (nsaa % 8); } @@ -6294,14 +6324,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { result.stack_align = 16; }, .Unspecified => { - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { result.return_value = .{ .register = self.registerAlias(.x0, ret_ty) }; @@ -6318,9 +6348,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; for (param_types, 0..) |ty, i| { - if (ty.abiSize(self.target.*) > 0) { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - const param_alignment = ty.abiAlignment(self.target.*); + if (ty.abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.abiSize(mod)); + const param_alignment = ty.abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; @@ -6371,7 +6401,8 @@ fn parseRegName(name: []const u8) ?Register { } fn registerAlias(self: *Self, reg: Register, ty: Type) Register { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (reg.class()) { .general_purpose => { diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 0c48f33ea1..cbfd6a1171 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -4,6 +4,7 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = union(enum) { memory, @@ -14,40 +15,40 @@ pub const Class = union(enum) { }; /// For `float_array` the second element will be the amount of floats. -pub fn classifyType(ty: Type, target: std.Target) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *const Module) Class { + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { if (ty.containerLayout() == .Packed) return .byval; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Union => { if (ty.containerLayout() == .Packed) return .byval; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Int, .Enum, .ErrorSet, .Float, .Bool => return .byval, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); // TODO is this controlled by a cpu feature? if (bit_size > 128) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { @@ -73,14 +74,15 @@ pub fn classifyType(ty: Type, target: std.Target) Class { } const sret_float_count = 4; -fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { +fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u8 { + const target = mod.getTarget(); const invalid = std.math.maxInt(u8); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); var max_count: u8 = 0; for (fields.values()) |field| { - const field_count = countFloats(field.ty, target, maybe_float_bits); + const field_count = countFloats(field.ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > sret_float_count) return invalid; @@ -93,7 +95,7 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { var i: u32 = 0; while (i < fields_len) : (i += 1) { const field_ty = ty.structFieldType(i); - const field_count = countFloats(field_ty, target, maybe_float_bits); + const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > sret_float_count) return invalid; @@ -113,12 +115,12 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { } } -pub fn getFloatArrayType(ty: Type) ?Type { - switch (ty.zigTypeTag()) { +pub fn getFloatArrayType(ty: Type, mod: *const Module) ?Type { + switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); for (fields.values()) |field| { - if (getFloatArrayType(field.ty)) |some| return some; + if (getFloatArrayType(field.ty, mod)) |some| return some; } return null; }, @@ -127,7 +129,7 @@ pub fn getFloatArrayType(ty: Type) ?Type { var i: u32 = 0; while (i < fields_len) : (i += 1) { const field_ty = ty.structFieldType(i); - if (getFloatArrayType(field_ty)) |some| return some; + if (getFloatArrayType(field_ty, mod)) |some| return some; } return null; }, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index bdc1627bd6..4c7151cd47 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -520,8 +520,9 @@ fn gen(self: *Self) !void { const ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const abi_align = ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -937,8 +938,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -1006,9 +1007,10 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { + const mod = self.bin_file.options.module.?; const elem_ty = self.air.typeOfIndex(inst).elemType(); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -1016,22 +1018,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1158,10 +1159,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); - const operand_abi_size = operand_ty.abiSize(self.target.*); - const dest_abi_size = dest_ty.abiSize(self.target.*); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const operand_abi_size = operand_ty.abiSize(mod); + const dest_abi_size = dest_ty.abiSize(mod); + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); const dst_mcv: MCValue = blk: { if (info_a.bits == info_b.bits) { @@ -1215,8 +1217,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 32) { if (info_a.bits > 32) { @@ -1278,6 +1281,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const operand_ty = self.air.typeOf(ty_op.operand); @@ -1286,7 +1290,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { .unreach => unreachable, .cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { var op_reg: Register = undefined; var dest_reg: Register = undefined; @@ -1319,7 +1323,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 32) { var op_reg: Register = undefined; var dest_reg: Register = undefined; @@ -1373,13 +1377,13 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -1582,6 +1586,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -1589,16 +1594,15 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits < 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1695,6 +1699,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; @@ -1702,16 +1707,15 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 16) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1859,19 +1863,20 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2017,7 +2022,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const optional_ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, optional_ty.abiSize(mod)); // Optional with a zero-bit payload type is just a boolean true if (abi_size == 1) { @@ -2036,16 +2042,17 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2067,7 +2074,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -2112,16 +2119,17 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2143,10 +2151,10 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, @@ -2221,19 +2229,20 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); @@ -2244,19 +2253,20 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); @@ -2361,7 +2371,8 @@ fn ptrElemVal( maybe_inst: ?Air.Inst.Index, ) !MCValue { const elem_ty = ptr_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (elem_size) { 1, 4 => { @@ -2647,7 +2658,8 @@ fn reuseOperand( fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const elem_ty = ptr_ty.elemType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (ptr) { .none => unreachable, @@ -2722,10 +2734,11 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -2734,7 +2747,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.dead; const dest_mcv: MCValue = blk: { - const ptr_fits_dest = elem_ty.abiSize(self.target.*) <= 4; + const ptr_fits_dest = elem_ty.abiSize(mod) <= 4; if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; @@ -2750,7 +2763,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const elem_size = @intCast(u32, value_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, value_ty.abiSize(mod)); switch (ptr) { .none => unreachable, @@ -2869,10 +2883,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -2892,10 +2907,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; const index = extra.field_index; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); const struct_field_ty = struct_ty.structFieldType(index); switch (mcv) { @@ -2959,10 +2975,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { ); const field_bit_offset = struct_field_offset * 8; - const field_bit_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)) * 8; + const field_bit_size = @intCast(u32, struct_field_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (struct_field_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, @@ -2981,17 +2997,18 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); const struct_ty = self.air.getRefType(ty_pl.ty).childType(); - if (struct_ty.zigTypeTag() == .Union) { + if (struct_ty.zigTypeTag(mod) == .Union) { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); } - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -3375,12 +3392,12 @@ fn addSub( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3431,12 +3448,12 @@ fn mul( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // TODO add optimisations for multiplication // with immediates, for example a * 2 can be @@ -3463,7 +3480,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), else => unreachable, @@ -3479,12 +3497,12 @@ fn divTrunc( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3522,12 +3540,12 @@ fn divFloor( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3569,7 +3587,8 @@ fn divExact( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => return self.fail("TODO ARM div_exact", .{}), @@ -3586,12 +3605,12 @@ fn rem( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3654,7 +3673,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => return self.fail("TODO ARM mod", .{}), @@ -3671,10 +3691,11 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // Generate an add/sub/mul const result: MCValue = switch (tag) { @@ -3708,12 +3729,12 @@ fn bitwise( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3753,16 +3774,17 @@ fn shiftExact( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const rhs_immediate = try rhs_bind.resolveToImmediate(self); const mir_tag: Mir.Inst.Tag = switch (tag) { .shl_exact => .lsl, - .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) { + .shr_exact => switch (lhs_ty.intInfo(mod).signedness) { .signed => Mir.Inst.Tag.asr, .unsigned => Mir.Inst.Tag.lsr, }, @@ -3791,10 +3813,11 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // Generate a shl_exact/shr_exact const result: MCValue = switch (tag) { @@ -3833,7 +3856,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3866,9 +3890,9 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { - const mod = self.bin_file.options.module.?; assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; @@ -3876,7 +3900,7 @@ fn ptrArithmetic( .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -3903,11 +3927,12 @@ fn ptrArithmetic( } fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh, 3, 4 => .ldr, else => unreachable, }; @@ -3924,7 +3949,7 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } }; const data: Mir.Inst.Data = switch (abi_size) { - 1 => if (ty.isSignedInt()) rr_extra_offset else rr_offset, + 1 => if (ty.isSignedInt(mod)) rr_extra_offset else rr_offset, 2 => rr_extra_offset, 3, 4 => rr_offset, else => unreachable, @@ -3937,7 +3962,8 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -4197,8 +4223,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.air.typeOf(callee); + const mod = self.bin_file.options.module.?; - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -4226,8 +4253,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); const ret_ty = fn_ty.fnReturnType(); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); + const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); var ptr_ty_payload: Type.Payload.ElemType = .{ @@ -4270,7 +4297,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -4294,7 +4321,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(Type.initTag(.usize), .lr, mcv); @@ -4356,11 +4383,12 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (self.ret_mcv) { .none => {}, .immediate => { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); }, .register => |reg| { // Return result by value @@ -4411,8 +4439,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const abi_align = ret_ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4448,21 +4477,21 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO ARM cmp non-pointer optionals", .{}); } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(), .Int => lhs_ty, .Bool => Type.initTag(.u1), .Pointer => Type.usize, @@ -4470,7 +4499,7 @@ fn cmp( else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 32) { try self.spillCompareFlagsIfOccupied(); @@ -4636,8 +4665,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -4772,8 +4801,9 @@ fn isNull( operand_bind: ReadArg.Bind, operand_ty: Type, ) !MCValue { - if (operand_ty.isPtrLikeOptional()) { - assert(operand_ty.abiSize(self.target.*) == 4); + const mod = self.bin_file.options.module.?; + if (operand_ty.isPtrLikeOptional(mod)) { + assert(operand_ty.abiSize(mod) == 4); const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } }; return self.cmp(operand_bind, imm_bind, Type.usize, .eq); @@ -5131,9 +5161,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { + const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5301,7 +5332,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5382,7 +5414,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); // C flag: movcs reg, #1 @@ -5466,6 +5498,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5640,17 +5673,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }, .stack_offset => |off| { // TODO: maybe addressing from sp instead of fp - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh, 3, 4 => .ldr, else => unreachable, }; const extra_offset = switch (abi_size) { - 1 => ty.isSignedInt(), + 1 => ty.isSignedInt(mod), 2 => true, 3, 4 => false, else => unreachable, @@ -5691,11 +5724,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, 3, 4 => .ldr_stack_argument, else => unreachable, }; @@ -5712,7 +5745,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -6039,8 +6073,9 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const error_union_align = error_union_ty.abiAlignment(self.target.*); + const mod = self.bin_file.options.module.?; + const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6069,22 +6104,18 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.value(inst, mod).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. @@ -6166,6 +6197,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -6180,12 +6212,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var ncrn: usize = 0; // Next Core Register Number var nsaa: u32 = 0; // Next stacked argument address - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); // TODO handle cases where multiple registers are used if (ret_ty_size <= 4) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; @@ -6200,10 +6232,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } for (param_types, 0..) |ty, i| { - if (ty.abiAlignment(self.target.*) == 8) + if (ty.abiAlignment(mod) == 8) ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6215,7 +6247,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; - if (ty.abiAlignment(self.target.*) == 8) + if (ty.abiAlignment(mod) == 8) nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; @@ -6227,14 +6259,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { result.stack_align = 8; }, .Unspecified => { - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 4) { result.return_value = .{ .register = .r0 }; @@ -6250,9 +6282,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; for (param_types, 0..) |ty, i| { - if (ty.abiSize(self.target.*) > 0) { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - const param_alignment = ty.abiAlignment(self.target.*); + if (ty.abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.abiSize(mod)); + const param_alignment = ty.abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 8b9ec45e24..ca7fff7d08 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -1,8 +1,10 @@ const std = @import("std"); +const assert = std.debug.assert; const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = union(enum) { memory, @@ -22,28 +24,28 @@ pub const Class = union(enum) { pub const Context = enum { ret, arg }; -pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class { + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; const max_byval_size = 512; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; const fields = ty.structFieldCount(); var i: u32 = 0; while (i < fields) : (i += 1) { const field_ty = ty.structFieldType(i); - const field_alignment = ty.structFieldAlign(i, target); - const field_size = field_ty.bitSize(target); + const field_alignment = ty.structFieldAlign(i, mod); + const field_size = field_ty.bitSize(mod); if (field_size > 32 or field_alignment > 32) { return Class.arrSize(bit_size, 64); } @@ -51,17 +53,17 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { return Class.arrSize(bit_size, 32); }, .Union => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; for (ty.unionFields().values()) |field| { - if (field.ty.bitSize(target) > 32 or field.normalAlignment(target) > 32) { + if (field.ty.bitSize(mod) > 32 or field.normalAlignment(mod) > 32) { return Class.arrSize(bit_size, 64); } } @@ -71,28 +73,28 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { .Int => { // TODO this is incorrect for _BitInt(128) but implementing // this correctly makes implementing compiler-rt impossible. - // const bit_size = ty.bitSize(target); + // const bit_size = ty.bitSize(mod); // if (bit_size > 64) return .memory; return .byval; }, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 64) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); // TODO is this controlled by a cpu feature? if (ctx == .ret and bit_size > 128) return .memory; if (bit_size > 512) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + assert(!ty.isSlice()); return .byval; }, .ErrorUnion, @@ -114,14 +116,15 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { } const byval_float_count = 4; -fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 { +fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u32 { + const target = mod.getTarget(); const invalid = std.math.maxInt(u32); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); var max_count: u32 = 0; for (fields.values()) |field| { - const field_count = countFloats(field.ty, target, maybe_float_bits); + const field_count = countFloats(field.ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > byval_float_count) return invalid; @@ -134,7 +137,7 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 { var i: u32 = 0; while (i < fields_len) : (i += 1) { const field_ty = ty.structFieldType(i); - const field_count = countFloats(field_ty, target, maybe_float_bits); + const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > byval_float_count) return invalid; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5fb07c5fdc..75d5a87bf2 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -755,8 +755,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -805,22 +805,22 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const elem_ty = self.air.typeOfIndex(inst).elemType(); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -893,10 +893,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); + const info_a = operand_ty.intInfo(mod); + const info_b = self.air.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1068,18 +1069,18 @@ fn binOp( lhs_ty: Type, rhs_ty: Type, ) InnerError!MCValue { + const mod = self.bin_file.options.module.?; switch (tag) { // Arithmetic operations on integers and floats .add, .sub, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO immediate operands return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); @@ -1093,14 +1094,14 @@ fn binOp( .ptr_add, .ptr_sub, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; const elem_ty = switch (ptr_ty.ptrSize()) { .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); if (elem_size == 1) { const base_tag: Air.Inst.Tag = switch (tag) { @@ -1331,10 +1332,11 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const optional_ty = self.air.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) + if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -1526,7 +1528,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + const mod = self.bin_file.options.module.?; + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -1698,6 +1701,7 @@ fn airFence(self: *Self) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; const fn_ty = self.air.typeOf(pl_op.operand); @@ -1736,7 +1740,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); @@ -1828,7 +1832,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const ty = self.air.typeOf(bin_op.lhs); const mod = self.bin_file.options.module.?; assert(ty.eql(self.air.typeOf(bin_op.rhs), mod)); - if (ty.zigTypeTag() == .ErrorSet) + if (ty.zigTypeTag(mod) == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); const lhs = try self.resolveInst(bin_op.lhs); @@ -2107,7 +2111,8 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -2533,22 +2538,18 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + if (!inst_ty.hasRuntimeBits(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.value(inst, mod).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { .constant => { // Constants have static lifetimes, so they are always memoized in the outer most table. @@ -2630,6 +2631,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -2650,7 +2652,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -2680,14 +2682,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), } - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, .Unspecified, .C => { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size <= 8) { result.return_value = .{ .register = .a0 }; } else if (ret_ty_size <= 16) { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index bec1b49a4e..c9e0873bce 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -3,16 +3,18 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = enum { memory, byval, integer, double_integer }; -pub fn classifyType(ty: Type, target: std.Target) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *const Module) Class { + const target = mod.getTarget(); + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); const max_byval_size = target.ptrBitWidth() * 2; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; @@ -23,7 +25,7 @@ pub fn classifyType(ty: Type, target: std.Target) Class { return .integer; }, .Union => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (ty.containerLayout() == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; @@ -36,17 +38,17 @@ pub fn classifyType(ty: Type, target: std.Target) Class { .Bool => return .integer, .Float => return .byval, .Int, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > max_byval_size) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > max_byval_size) return .memory; return .integer; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index b70bc0f73d..63b604857e 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -758,18 +758,18 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 32, 64 => { // Only say yes if the operation is @@ -1018,7 +1018,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { switch (arg) { .stack_offset => |off| { const mod = self.bin_file.options.module.?; - const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { + const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; const offset = off + abi_size; @@ -1203,6 +1203,7 @@ fn airBreakpoint(self: *Self) !void { } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; // We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you. @@ -1218,14 +1219,14 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO byteswap for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits == 8) break :result operand; const abi_size = int_info.bits >> 3; - const abi_align = operand_ty.abiAlignment(self.target.*); + const abi_align = operand_ty.abiAlignment(mod); const opposite_endian_asi = switch (self.target.cpu.arch.endian()) { Endian.Big => ASI.asi_primary_little, Endian.Little => ASI.asi_primary, @@ -1294,7 +1295,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); const ty = self.air.typeOf(callee); - const fn_ty = switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -1337,7 +1339,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -1374,7 +1376,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } else @panic("TODO SPARCv9 currently does not support non-ELF binaries"); } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .o7, mcv); @@ -1422,15 +1424,15 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(), .Int => lhs_ty, .Bool => Type.initTag(.u1), .Pointer => Type.usize, @@ -1438,9 +1440,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{}); @@ -1450,7 +1452,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 64) { _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{ .lhs = bin_op.lhs, @@ -1512,8 +1514,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -1752,10 +1754,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); + const info_a = operand_ty.intInfo(mod); + const info_b = self.air.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1814,9 +1817,10 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -2037,18 +2041,18 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { //const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 1...32 => { try self.spillConditionFlagsIfOccupied(); @@ -2101,6 +2105,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); @@ -2116,7 +2121,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }; }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { const op_reg = switch (operand) { .register => |r| r, @@ -2150,7 +2155,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 64) { const op_reg = switch (operand) { .register => |r| r, @@ -2332,16 +2337,17 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { try self.spillConditionFlagsIfOccupied(); @@ -2449,7 +2455,8 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.air.typeOf(bin_op.lhs); const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -2564,9 +2571,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .dead, .unreach => unreachable, @@ -2701,7 +2709,8 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const error_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) break :result mcv; + const mod = self.bin_file.options.module.?; + if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); }; @@ -2713,7 +2722,8 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; + const mod = self.bin_file.options.module.?; + if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); }; @@ -2727,7 +2737,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const error_union_ty = self.air.getRefType(ty_op.ty); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) break :result mcv; + const mod = self.bin_file.options.module.?; + if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); }; @@ -2747,7 +2758,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const optional_ty = self.air.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) + const mod = self.bin_file.options.module.?; + if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -2784,7 +2796,8 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const elem_ty = self.air.typeOfIndex(inst).elemType(); - if (!elem_ty.hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -2792,22 +2805,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = self.air.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -2860,12 +2872,12 @@ fn binOp( .xor, .cmp_eq, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Only say yes if the operation is // commutative, i.e. we can swap both of the @@ -2934,10 +2946,10 @@ fn binOp( const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const result_reg = result.register; try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); @@ -2951,11 +2963,11 @@ fn binOp( }, .div_trunc => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate_ok = switch (tag) { .div_trunc => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12), @@ -2984,14 +2996,14 @@ fn binOp( }, .ptr_add => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; const elem_ty = switch (ptr_ty.ptrSize()) { .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); if (elem_size == 1) { const base_tag: Mir.Inst.Tag = switch (tag) { @@ -3016,7 +3028,7 @@ fn binOp( .bool_and, .bool_or, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert(lhs != .immediate); // should have been handled by Sema assert(rhs != .immediate); // should have been handled by Sema @@ -3046,10 +3058,10 @@ fn binOp( const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // 32 and 64 bit operands doesn't need truncating if (int_info.bits == 32 or int_info.bits == 64) return result; @@ -3068,10 +3080,10 @@ fn binOp( .shl_exact, .shr_exact, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate_ok = rhs == .immediate; @@ -3393,7 +3405,8 @@ fn binOpRegister( fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (self.air.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3512,16 +3525,17 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { /// Given an error union, returns the payload fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { + const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return error_union_mcv; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_offset => |off| { @@ -3555,8 +3569,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -3730,6 +3744,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3928,19 +3943,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. try self.genSetReg(ty, reg, .{ .immediate = addr }); - try self.genLoad(reg, reg, i13, 0, ty.abiSize(self.target.*)); + try self.genLoad(reg, reg, i13, 0, ty.abiSize(mod)); }, .stack_offset => |off| { const real_offset = realStackOffset(off); const simm13 = math.cast(i13, real_offset) orelse return self.fail("TODO larger stack offsets: {}", .{real_offset}); - try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*)); + try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(mod)); }, } } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3948,7 +3964,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(self.target.*)) { + switch (ty.abiSize(mod)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -3978,7 +3994,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); // TODO handle floating point CCRs @@ -4152,13 +4168,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!error_type.hasRuntimeBits()) { + if (!error_type.hasRuntimeBits(mod)) { return MCValue{ .immediate = 0 }; // always false - } else if (!payload_type.hasRuntimeBits()) { - if (error_type.abiSize(self.target.*) <= 8) { + } else if (!payload_type.hasRuntimeBits(mod)) { + if (error_type.abiSize(mod) <= 8) { const reg_mcv: MCValue = switch (operand) { .register => operand, else => .{ .register = try self.copyToTmpRegister(error_type, operand) }, @@ -4249,8 +4266,9 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void { } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; const elem_ty = ptr_ty.elemType(); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4321,11 +4339,11 @@ fn minMax( ) InnerError!MCValue { const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO min/max on floats", .{}), .Vector => return self.fail("TODO min/max on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO skip register setting when one of the operands // is a small (fits in i13) immediate. @@ -4455,6 +4473,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) errdefer self.gpa.free(result.args); const ret_ty = fn_ty.fnReturnType(); + const mod = self.bin_file.options.module.?; switch (cc) { .Naked => { @@ -4478,7 +4497,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4505,12 +4524,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) result.stack_byte_count = next_stack_offset; result.stack_align = 16; - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. if (ret_ty_size <= 8) { result.return_value = switch (role) { @@ -4528,40 +4547,37 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) return result; } -fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } +fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { + const mod = self.bin_file.options.module.?; + const ty = self.air.typeOf(ref); // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) - return MCValue{ .none = {} }; - - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); - switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { - // Constants have static lifetimes, so they are always memoized in the outer most table. - const branch = &self.branch_stack.items[0]; - const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); - if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; - gop.value_ptr.* = try self.genTypedValue(.{ - .ty = inst_ty, - .val = self.air.values[ty_pl.payload], - }); - } - return gop.value_ptr.*; - }, - .const_ty => unreachable, - else => return self.getResolvedInstValue(inst_index), + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; + + if (Air.refToIndex(ref)) |inst| { + switch (self.air.instructions.items(.tag)[inst]) { + .constant => { + // Constants have static lifetimes, so they are always memoized in the outer most table. + const branch = &self.branch_stack.items[0]; + const gop = try branch.inst_table.getOrPut(self.gpa, inst); + if (!gop.found_existing) { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + gop.value_ptr.* = try self.genTypedValue(.{ + .ty = ty, + .val = self.air.values[ty_pl.payload], + }); + } + return gop.value_ptr.*; + }, + .const_ty => unreachable, + else => return self.getResolvedInstValue(inst), + } } + + return self.genTypedValue(.{ + .ty = ty, + .val = self.air.value(ref, mod).?, + }); } fn ret(self: *Self, mcv: MCValue) !void { @@ -4666,7 +4682,8 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const abi_size = value_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = value_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4707,10 +4724,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4748,8 +4766,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 64) { const operand_reg = switch (operand) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d4be9bf139..b592ffcb2a 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -788,9 +788,10 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref); assert(!gop.found_existing); - const val = func.air.value(ref).?; + const mod = func.bin_file.base.options.module.?; + const val = func.air.value(ref, mod).?; const ty = func.air.typeOf(ref); - if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; } @@ -801,7 +802,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { // // In the other cases, we will simply lower the constant to a value that fits // into a single local (such as a pointer, integer, bool, etc). - const result = if (isByRef(ty, func.target)) blk: { + const result = if (isByRef(ty, mod)) blk: { const sym_index = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, func.decl_index); break :blk WValue{ .memory = sym_index }; } else try func.lowerConstant(val, ty); @@ -987,8 +988,9 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 } /// Using a given `Type`, returns the corresponding type -fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { - return switch (ty.zigTypeTag()) { +fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { + const target = mod.getTarget(); + return switch (ty.zigTypeTag(mod)) { .Float => blk: { const bits = ty.floatBits(target); if (bits == 16) return wasm.Valtype.i32; // stored/loaded as u16 @@ -998,7 +1000,7 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { return wasm.Valtype.i32; // represented as pointer to stack }, .Int, .Enum => blk: { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.bits <= 32) break :blk wasm.Valtype.i32; if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64; break :blk wasm.Valtype.i32; // represented as pointer to stack @@ -1006,22 +1008,18 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { .Struct => switch (ty.containerLayout()) { .Packed => { const struct_obj = ty.castTag(.@"struct").?.data; - return typeToValtype(struct_obj.backing_int_ty, target); + return typeToValtype(struct_obj.backing_int_ty, mod); }, else => wasm.Valtype.i32, }, - .Vector => switch (determineSimdStoreStrategy(ty, target)) { + .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .direct => wasm.Valtype.v128, .unrolled => wasm.Valtype.i32, }, .Union => switch (ty.containerLayout()) { .Packed => { - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.bitSize(target)), - }; - const int_ty = Type.initPayload(&int_ty_payload.base); - return typeToValtype(int_ty, target); + const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory"); + return typeToValtype(int_ty, mod); }, else => wasm.Valtype.i32, }, @@ -1030,17 +1028,17 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { } /// Using a given `Type`, returns the byte representation of its wasm value type -fn genValtype(ty: Type, target: std.Target) u8 { - return wasm.valtype(typeToValtype(ty, target)); +fn genValtype(ty: Type, mod: *Module) u8 { + return wasm.valtype(typeToValtype(ty, mod)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type -fn genBlockType(ty: Type, target: std.Target) u8 { +fn genBlockType(ty: Type, mod: *Module) u8 { return switch (ty.tag()) { .void, .noreturn => wasm.block_empty, - else => genValtype(ty, target), + else => genValtype(ty, mod), }; } @@ -1101,7 +1099,8 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue { /// Creates one locals for a given `Type`. /// Returns a corresponding `Wvalue` with `local` as active tag fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - const valtype = typeToValtype(ty, func.target); + const mod = func.bin_file.base.options.module.?; + const valtype = typeToValtype(ty, mod); switch (valtype) { .i32 => if (func.free_locals_i32.popOrNull()) |index| { log.debug("reusing local ({d}) of type {}", .{ index, valtype }); @@ -1132,7 +1131,8 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Ensures a new local will be created. This is useful when it's useful /// to use a zero-initialized local. fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - try func.locals.append(func.gpa, genValtype(ty, func.target)); + const mod = func.bin_file.base.options.module.?; + try func.locals.append(func.gpa, genValtype(ty, mod)); const initial_index = func.local_index; func.local_index += 1; return WValue{ .local = .{ .value = initial_index, .references = 1 } }; @@ -1140,48 +1140,54 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Generates a `wasm.Type` from a given function type. /// Memory is owned by the caller. -fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []const Type, return_type: Type, target: std.Target) !wasm.Type { +fn genFunctype( + gpa: Allocator, + cc: std.builtin.CallingConvention, + params: []const Type, + return_type: Type, + mod: *Module, +) !wasm.Type { var temp_params = std.ArrayList(wasm.Valtype).init(gpa); defer temp_params.deinit(); var returns = std.ArrayList(wasm.Valtype).init(gpa); defer returns.deinit(); - if (firstParamSRet(cc, return_type, target)) { + if (firstParamSRet(cc, return_type, mod)) { try temp_params.append(.i32); // memory address is always a 32-bit handle - } else if (return_type.hasRuntimeBitsIgnoreComptime()) { + } else if (return_type.hasRuntimeBitsIgnoreComptime(mod)) { if (cc == .C) { - const res_classes = abi.classifyType(return_type, target); + const res_classes = abi.classifyType(return_type, mod); assert(res_classes[0] == .direct and res_classes[1] == .none); - const scalar_type = abi.scalarType(return_type, target); - try returns.append(typeToValtype(scalar_type, target)); + const scalar_type = abi.scalarType(return_type, mod); + try returns.append(typeToValtype(scalar_type, mod)); } else { - try returns.append(typeToValtype(return_type, target)); + try returns.append(typeToValtype(return_type, mod)); } - } else if (return_type.isError()) { + } else if (return_type.isError(mod)) { try returns.append(.i32); } // param types for (params) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; switch (cc) { .C => { - const param_classes = abi.classifyType(param_type, target); + const param_classes = abi.classifyType(param_type, mod); for (param_classes) |class| { if (class == .none) continue; if (class == .direct) { - const scalar_type = abi.scalarType(param_type, target); - try temp_params.append(typeToValtype(scalar_type, target)); + const scalar_type = abi.scalarType(param_type, mod); + try temp_params.append(typeToValtype(scalar_type, mod)); } else { - try temp_params.append(typeToValtype(param_type, target)); + try temp_params.append(typeToValtype(param_type, mod)); } } }, - else => if (isByRef(param_type, target)) + else => if (isByRef(param_type, mod)) try temp_params.append(.i32) else - try temp_params.append(typeToValtype(param_type, target)), + try temp_params.append(typeToValtype(param_type, mod)), } } @@ -1227,7 +1233,8 @@ pub fn generate( fn genFunc(func: *CodeGen) InnerError!void { const fn_info = func.decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); + const mod = func.bin_file.base.options.module.?; + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); defer func_type.deinit(func.gpa); _ = try func.bin_file.storeDeclType(func.decl_index, func_type); @@ -1254,7 +1261,7 @@ fn genFunc(func: *CodeGen) InnerError!void { if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst = @intCast(u32, func.air.instructions.len - 1); const last_inst_ty = func.air.typeOfIndex(inst); - if (!last_inst_ty.hasRuntimeBitsIgnoreComptime() or last_inst_ty.isNoReturn()) { + if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn()) { try func.addTag(.@"unreachable"); } } @@ -1335,6 +1342,7 @@ const CallWValues = struct { }; fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { + const mod = func.bin_file.base.options.module.?; const cc = fn_ty.fnCallingConvention(); const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen()); defer func.gpa.free(param_types); @@ -1351,7 +1359,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV // Check if we store the result as a pointer to the stack rather than // by value const fn_info = fn_ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; @@ -1361,7 +1369,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV switch (cc) { .Unspecified => { for (param_types) |ty| { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { continue; } @@ -1371,7 +1379,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV }, .C => { for (param_types) |ty| { - const ty_classes = abi.classifyType(ty, func.target); + const ty_classes = abi.classifyType(ty, mod); for (ty_classes) |class| { if (class == .none) continue; try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); @@ -1385,11 +1393,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, target: std.Target) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *const Module) bool { switch (cc) { - .Unspecified, .Inline => return isByRef(return_type, target), + .Unspecified, .Inline => return isByRef(return_type, mod), .C => { - const ty_classes = abi.classifyType(return_type, target); + const ty_classes = abi.classifyType(return_type, mod); if (ty_classes[0] == .indirect) return true; if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true; return false; @@ -1405,16 +1413,17 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } - const ty_classes = abi.classifyType(ty, func.target); + const mod = func.bin_file.base.options.module.?; + const ty_classes = abi.classifyType(ty, mod); assert(ty_classes[0] != .none); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct, .Union => { if (ty_classes[0] == .indirect) { return func.lowerToStack(value); } assert(ty_classes[0] == .direct); - const scalar_type = abi.scalarType(ty, func.target); - const abi_size = scalar_type.abiSize(func.target); + const scalar_type = abi.scalarType(ty, mod); + const abi_size = scalar_type.abiSize(mod); try func.emitWValue(value); // When the value lives in the virtual stack, we must load it onto the actual stack @@ -1422,12 +1431,12 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: const opcode = buildOpcode(.{ .op = .load, .width = @intCast(u8, abi_size), - .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, func.target), + .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, + .valtype1 = typeToValtype(scalar_type, mod), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = value.offset(), - .alignment = scalar_type.abiAlignment(func.target), + .alignment = scalar_type.abiAlignment(mod), }); } }, @@ -1436,7 +1445,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } assert(ty_classes[0] == .direct and ty_classes[1] == .direct); - assert(ty.abiSize(func.target) == 16); + assert(ty.abiSize(mod) == 16); // in this case we have an integer or float that must be lowered as 2 i64's. try func.emitWValue(value); try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 }); @@ -1503,18 +1512,18 @@ fn restoreStackPointer(func: *CodeGen) !void { /// /// Asserts Type has codegenbits fn allocStack(func: *CodeGen, ty: Type) !WValue { - assert(ty.hasRuntimeBitsIgnoreComptime()); + const mod = func.bin_file.base.options.module.?; + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); if (func.initial_stack_value == .none) { try func.initializeStack(); } - const abi_size = std.math.cast(u32, ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; + const abi_size = std.math.cast(u32, ty.abiSize(mod)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - ty.fmt(module), ty.abiSize(func.target), + ty.fmt(mod), ty.abiSize(mod), }); }; - const abi_align = ty.abiAlignment(func.target); + const abi_align = ty.abiAlignment(mod); if (abi_align > func.stack_alignment) { func.stack_alignment = abi_align; @@ -1531,6 +1540,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { /// This is different from allocStack where this will use the pointer's alignment /// if it is set, to ensure the stack alignment will be set correctly. fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { + const mod = func.bin_file.base.options.module.?; const ptr_ty = func.air.typeOfIndex(inst); const pointee_ty = ptr_ty.childType(); @@ -1538,15 +1548,14 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { try func.initializeStack(); } - if (!pointee_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pointee_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.allocStack(Type.usize); // create a value containing just the stack pointer. } - const abi_alignment = ptr_ty.ptrAlignment(func.target); - const abi_size = std.math.cast(u32, pointee_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; + const abi_alignment = ptr_ty.ptrAlignment(mod); + const abi_size = std.math.cast(u32, pointee_ty.abiSize(mod)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - pointee_ty.fmt(module), pointee_ty.abiSize(func.target), + pointee_ty.fmt(mod), pointee_ty.abiSize(mod), }); }; if (abi_alignment > func.stack_alignment) { @@ -1704,8 +1713,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, target: std.Target) bool { - switch (ty.zigTypeTag()) { +fn isByRef(ty: Type, mod: *const Module) bool { + const target = mod.getTarget(); + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -1726,40 +1736,40 @@ fn isByRef(ty: Type, target: std.Target) bool { .Array, .Frame, - => return ty.hasRuntimeBitsIgnoreComptime(), + => return ty.hasRuntimeBitsIgnoreComptime(mod), .Union => { if (ty.castTag(.@"union")) |union_ty| { if (union_ty.data.layout == .Packed) { - return ty.abiSize(target) > 8; + return ty.abiSize(mod) > 8; } } - return ty.hasRuntimeBitsIgnoreComptime(); + return ty.hasRuntimeBitsIgnoreComptime(mod); }, .Struct => { if (ty.castTag(.@"struct")) |struct_ty| { const struct_obj = struct_ty.data; if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { - return isByRef(struct_obj.backing_int_ty, target); + return isByRef(struct_obj.backing_int_ty, mod); } } - return ty.hasRuntimeBitsIgnoreComptime(); + return ty.hasRuntimeBitsIgnoreComptime(mod); }, - .Vector => return determineSimdStoreStrategy(ty, target) == .unrolled, - .Int => return ty.intInfo(target).bits > 64, + .Vector => return determineSimdStoreStrategy(ty, mod) == .unrolled, + .Int => return ty.intInfo(mod).bits > 64, .Float => return ty.floatBits(target) > 64, .ErrorUnion => { const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } return true; }, .Optional => { - if (ty.isPtrLikeOptional()) return false; + if (ty.isPtrLikeOptional(mod)) return false; var buf: Type.Payload.ElemType = undefined; const pl_type = ty.optionalChild(&buf); - if (pl_type.zigTypeTag() == .ErrorSet) return false; - return pl_type.hasRuntimeBitsIgnoreComptime(); + if (pl_type.zigTypeTag(mod) == .ErrorSet) return false; + return pl_type.hasRuntimeBitsIgnoreComptime(mod); }, .Pointer => { // Slices act like struct and will be passed by reference @@ -1778,10 +1788,11 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, target: std.Target) SimdStoreStrategy { - std.debug.assert(ty.zigTypeTag() == .Vector); - if (ty.bitSize(target) != 128) return .unrolled; +fn determineSimdStoreStrategy(ty: Type, mod: *const Module) SimdStoreStrategy { + std.debug.assert(ty.zigTypeTag(mod) == .Vector); + if (ty.bitSize(mod) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; + const target = mod.getTarget(); const features = target.cpu.features; if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) { return .direct; @@ -2084,32 +2095,33 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(un_op); const fn_info = func.decl.ty.fnInfo(); const ret_ty = fn_info.return_type; + const mod = func.bin_file.base.options.module.?; // result must be stored in the stack and we return a pointer // to the stack instead if (func.return_value != .none) { try func.store(func.return_value, operand, ret_ty, 0); - } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) { - switch (ret_ty.zigTypeTag()) { + } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + switch (ret_ty.zigTypeTag(mod)) { // Aggregate types can be lowered as a singular value .Struct, .Union => { - const scalar_type = abi.scalarType(ret_ty, func.target); + const scalar_type = abi.scalarType(ret_ty, mod); try func.emitWValue(operand); const opcode = buildOpcode(.{ .op = .load, - .width = @intCast(u8, scalar_type.abiSize(func.target) * 8), - .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, func.target), + .width = @intCast(u8, scalar_type.abiSize(mod) * 8), + .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, + .valtype1 = typeToValtype(scalar_type, mod), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = operand.offset(), - .alignment = scalar_type.abiAlignment(func.target), + .alignment = scalar_type.abiAlignment(mod), }); }, else => try func.emitWValue(operand), } } else { - if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and ret_ty.isError(mod)) { try func.addImm32(0); } else { try func.emitWValue(operand); @@ -2123,14 +2135,15 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const child_type = func.air.typeOfIndex(inst).childType(); + const mod = func.bin_file.base.options.module.?; var result = result: { - if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { break :result try func.allocStack(Type.usize); // create pointer to void } const fn_info = func.decl.ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { break :result func.return_value; } @@ -2141,16 +2154,17 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const ret_ty = func.air.typeOf(un_op).childType(); const fn_info = func.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (ret_ty.isError(mod)) { try func.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } @@ -2167,26 +2181,26 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]); const ty = func.air.typeOf(pl_op.operand); - const fn_ty = switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, }; const ret_ty = fn_ty.fnReturnType(); const fn_info = fn_ty.fnInfo(); - const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target); + const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod); const callee: ?Decl.Index = blk: { - const func_val = func.air.value(pl_op.operand) orelse break :blk null; - const module = func.bin_file.base.options.module.?; + const func_val = func.air.value(pl_op.operand, mod) orelse break :blk null; if (func_val.castTag(.function)) |function| { _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); break :blk function.data.owner_decl; } else if (func_val.castTag(.extern_fn)) |extern_fn| { - const ext_decl = module.declPtr(extern_fn.data.owner_decl); + const ext_decl = mod.declPtr(extern_fn.data.owner_decl); const ext_info = ext_decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target); + var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, mod); defer func_type.deinit(func.gpa); const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); const atom = func.bin_file.getAtomPtr(atom_index); @@ -2215,7 +2229,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const arg_val = try func.resolveInst(arg); const arg_ty = func.air.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); } @@ -2226,11 +2240,11 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else { // in this case we call a function pointer // so load its value onto the stack - std.debug.assert(ty.zigTypeTag() == .Pointer); + std.debug.assert(ty.zigTypeTag(mod) == .Pointer); const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type); @@ -2238,7 +2252,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } const result_value = result_value: { - if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { break :result_value WValue{ .none = {} }; } else if (ret_ty.isNoReturn()) { try func.addTag(.@"unreachable"); @@ -2246,10 +2260,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else if (first_param_sret) { break :result_value sret; // TODO: Make this less fragile and optimize - } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag() == .Struct or ret_ty.zigTypeTag() == .Union) { + } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { const result_local = try func.allocLocal(ret_ty); try func.addLabel(.local_set, result_local.local.value); - const scalar_type = abi.scalarType(ret_ty, func.target); + const scalar_type = abi.scalarType(ret_ty, mod); const result = try func.allocStack(scalar_type); try func.store(result, result_local, scalar_type, 0); break :result_value result; @@ -2272,6 +2286,7 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -2290,17 +2305,13 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void } else { // at this point we have a non-natural alignment, we must // load the value, and then shift+or the rhs into the result location. - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const int_elem_ty = Type.initPayload(&int_ty_payload.base); + const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8); - if (isByRef(int_elem_ty, func.target)) { + if (isByRef(int_elem_ty, mod)) { return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); } - var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(func.target))) - 1); + var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1); mask <<= @intCast(u6, ptr_info.bit_offset); mask ^= ~@as(u64, 0); const shift_val = if (ptr_info.host_size <= 4) @@ -2329,11 +2340,12 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void { assert(!(lhs != .stack and rhs == .stack)); - const abi_size = ty.abiSize(func.target); - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const abi_size = ty.abiSize(mod); + switch (ty.zigTypeTag(mod)) { .ErrorUnion => { const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.anyerror, 0); } @@ -2341,26 +2353,26 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { return func.store(lhs, rhs, Type.usize, 0); } var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.u8, 0); } - if (pl_ty.zigTypeTag() == .ErrorSet) { + if (pl_ty.zigTypeTag(mod) == .ErrorSet) { return func.store(lhs, rhs, Type.anyerror, 0); } const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Struct, .Array, .Union => if (isByRef(ty, func.target)) { + .Struct, .Array, .Union => if (isByRef(ty, mod)) { const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Vector => switch (determineSimdStoreStrategy(ty, func.target)) { + .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .unrolled => { const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); @@ -2374,7 +2386,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_store), offset + lhs.offset(), - ty.abiAlignment(func.target), + ty.abiAlignment(mod), }); return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); }, @@ -2404,7 +2416,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset()); return; } else if (abi_size > 16) { - try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(func.target)) }); + try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) }); }, else => if (abi_size > 8) { return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ @@ -2418,7 +2430,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // into lhs, so we calculate that and emit that instead try func.lowerToStack(rhs); - const valtype = typeToValtype(ty, func.target); + const valtype = typeToValtype(ty, mod); const opcode = buildOpcode(.{ .valtype1 = valtype, .width = @intCast(u8, abi_size * 8), @@ -2428,21 +2440,22 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // store rhs value at stack pointer's location in memory try func.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), - .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(func.target) }, + .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) }, ); } fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); const ptr_ty = func.air.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo().data; - if (!ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{ty_op.operand}); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); const result = result: { - if (isByRef(ty, func.target)) { + if (isByRef(ty, mod)) { const new_local = try func.allocStack(ty); try func.store(new_local, operand, ty, 0); break :result new_local; @@ -2455,11 +2468,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // at this point we have a non-natural alignment, we must // shift the value to obtain the correct bit. - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const int_elem_ty = Type.initPayload(&int_ty_payload.base); + const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8); const shift_val = if (ptr_info.host_size <= 4) WValue{ .imm32 = ptr_info.bit_offset } else if (ptr_info.host_size <= 8) @@ -2479,25 +2488,26 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Loads an operand from the linear memory section. /// NOTE: Leaves the value on the stack. fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; // load local's value from memory by its stack position try func.emitWValue(operand); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { // TODO: Add helper functions for simd opcodes const extra_index = @intCast(u32, func.mir_extra.items.len); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_load), offset + operand.offset(), - ty.abiAlignment(func.target), + ty.abiAlignment(mod), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); return WValue{ .stack = {} }; } - const abi_size = @intCast(u8, ty.abiSize(func.target)); + const abi_size = @intCast(u8, ty.abiSize(mod)); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, func.target), + .valtype1 = typeToValtype(ty, mod), .width = abi_size * 8, .op = .load, .signedness = .unsigned, @@ -2505,7 +2515,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu try func.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), - .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(func.target) }, + .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) }, ); return WValue{ .stack = {} }; @@ -2516,8 +2526,9 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const arg = func.args[arg_index]; const cc = func.decl.ty.fnInfo().cc; const arg_ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; if (cc == .C) { - const arg_classes = abi.classifyType(arg_ty, func.target); + const arg_classes = abi.classifyType(arg_ty, mod); for (arg_classes) |class| { if (class != .none) { func.arg_index += 1; @@ -2527,7 +2538,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When we have an argument that's passed using more than a single parameter, // we combine them into a single stack value if (arg_classes[0] == .direct and arg_classes[1] == .direct) { - if (arg_ty.zigTypeTag() != .Int and arg_ty.zigTypeTag() != .Float) { + if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) { return func.fail( "TODO: Implement C-ABI argument for type '{}'", .{arg_ty.fmt(func.bin_file.base.options.module.?)}, @@ -2557,6 +2568,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -2570,10 +2582,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2593,6 +2605,7 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { /// Performs a binary operation on the given `WValue`'s /// NOTE: THis leaves the value on top of the stack. fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; assert(!(lhs != .stack and rhs == .stack)); if (ty.isAnyFloat()) { @@ -2600,8 +2613,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! return func.floatOp(float_op, ty, &.{ lhs, rhs }); } - if (isByRef(ty, func.target)) { - if (ty.zigTypeTag() == .Int) { + if (isByRef(ty, mod)) { + if (ty.zigTypeTag(mod) == .Int) { return func.binOpBigInt(lhs, rhs, ty, op); } else { return func.fail( @@ -2613,8 +2626,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = typeToValtype(ty, func.target), - .signedness = if (ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(ty, mod), + .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, }); try func.emitWValue(lhs); try func.emitWValue(rhs); @@ -2625,7 +2638,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! } fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - if (ty.intInfo(func.target).bits > 128) { + const mod = func.bin_file.base.options.module.?; + if (ty.intInfo(mod).bits > 128) { return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{}); } @@ -2763,7 +2777,8 @@ fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError } fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue { - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement floatOps for vectors", .{}); } @@ -2773,7 +2788,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In for (args) |operand| { try func.emitWValue(operand); } - const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, func.target) }); + const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, mod) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } @@ -2827,6 +2842,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In } fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); @@ -2834,7 +2850,7 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const lhs_ty = func.air.typeOf(bin_op.lhs); const rhs_ty = func.air.typeOf(bin_op.rhs); - if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement wrapping arithmetic for vectors", .{}); } @@ -2845,10 +2861,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2877,8 +2893,9 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr /// Asserts `Type` is <= 128 bits. /// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack. fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - assert(ty.abiSize(func.target) <= 16); - const bitsize = @intCast(u16, ty.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + assert(ty.abiSize(mod) <= 16); + const bitsize = @intCast(u16, ty.bitSize(mod)); const wasm_bits = toWasmBits(bitsize) orelse { return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize}); }; @@ -2915,6 +2932,7 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { } fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; switch (ptr_val.tag()) { .decl_ref_mut => { const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; @@ -2932,15 +2950,15 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent_ty = field_ptr.container_ty; - const field_offset = switch (parent_ty.zigTypeTag()) { + const field_offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout()) { - .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, func.target), - else => parent_ty.structFieldOffset(field_ptr.field_index, func.target), + .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod), + else => parent_ty.structFieldOffset(field_ptr.field_index, mod), }, .Union => switch (parent_ty.containerLayout()) { .Packed => 0, else => blk: { - const layout: Module.Union.Layout = parent_ty.unionGetLayout(func.target); + const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) break :blk 0; if (layout.payload_align > layout.tag_align) break :blk 0; @@ -2964,7 +2982,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; const index = elem_ptr.index; - const elem_offset = index * elem_ptr.elem_ty.abiSize(func.target); + const elem_offset = index * elem_ptr.elem_ty.abiSize(mod); return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset)); }, .opt_payload_ptr => { @@ -2976,9 +2994,9 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue } fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { - const module = func.bin_file.base.options.module.?; - const decl = module.declPtr(decl_index); - module.markDeclAlive(decl); + const mod = func.bin_file.base.options.module.?; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = decl.ty, @@ -2992,18 +3010,18 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) }; } - const module = func.bin_file.base.options.module.?; - const decl = module.declPtr(decl_index); - if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) { + const mod = func.bin_file.base.options.module.?; + const decl = mod.declPtr(decl_index); + if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) { return WValue{ .imm32 = 0xaaaaaaaa }; } - module.markDeclAlive(decl); + mod.markDeclAlive(decl); const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index); const atom = func.bin_file.getAtom(atom_index); const target_sym_index = atom.sym_index; - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { try func.bin_file.addTableFunction(target_sym_index); return WValue{ .function_index = target_sym_index }; } else if (offset == 0) { @@ -3041,31 +3059,31 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const decl_index = decl_ref_mut.data.decl_index; return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); } - const target = func.target; - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Void => return WValue{ .none = {} }, .Int => { - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { 0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement( - val.toSignedInt(target), + val.toSignedInt(mod), @intCast(u6, int_info.bits), )) }, 33...64 => return WValue{ .imm64 = toTwosComplement( - val.toSignedInt(target), + val.toSignedInt(mod), @intCast(u7, int_info.bits), ) }, else => unreachable, }, .unsigned => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, - 33...64 => return WValue{ .imm64 = val.toUnsignedInt(target) }, + 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, + 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) }, else => unreachable, }, } }, - .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, + .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, .Float => switch (ty.floatBits(func.target)) { 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) }, 32 => return WValue{ .float32 = val.toFloat(f32) }, @@ -3074,7 +3092,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .Pointer => switch (val.tag()) { .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), - .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, + .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, .zero, .null_value => return WValue{ .imm32 = 0 }, else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), }, @@ -3100,8 +3118,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}), } } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_buffer); + const int_tag_ty = ty.intTagType(); return func.lowerConstant(val, int_tag_ty); } }, @@ -3115,7 +3132,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { .ErrorUnion => { const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); const err_val = if (!is_pl) val else Value.initTag(.zero); @@ -3123,12 +3140,12 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, - .Optional => if (ty.optionalReprIsPayload()) { + .Optional => if (ty.optionalReprIsPayload(mod)) { var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); if (val.castTag(.opt_payload)) |payload| { return func.lowerConstant(payload.data, pl_ty); - } else if (val.isNull()) { + } else if (val.isNull(mod)) { return WValue{ .imm32 = 0 }; } else { return func.lowerConstant(val, pl_ty); @@ -3150,7 +3167,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { return func.lowerConstant(int_val, struct_obj.backing_int_ty); }, .Vector => { - assert(determineSimdStoreStrategy(ty, target) == .direct); + assert(determineSimdStoreStrategy(ty, mod) == .direct); var buf: [16]u8 = undefined; val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable; return func.storeSimdImmd(buf); @@ -3176,9 +3193,10 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue { } fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa }, - .Int, .Enum => switch (ty.intInfo(func.target).bits) { + .Int, .Enum => switch (ty.intInfo(mod).bits) { 0...32 => return WValue{ .imm32 = 0xaaaaaaaa }, 33...64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa }, else => unreachable, @@ -3197,7 +3215,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { .Optional => { var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return func.emitUndefined(pl_ty); } return WValue{ .imm32 = 0xaaaaaaaa }; @@ -3210,7 +3228,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { assert(struct_obj.layout == .Packed); return func.emitUndefined(struct_obj.backing_int_ty); }, - else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag()}), + else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}), } } @@ -3218,8 +3236,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { - const target = func.target; - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Enum => { if (val.castTag(.enum_field_index)) |field_index| { switch (ty.tag()) { @@ -3239,35 +3257,35 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { else => unreachable, } } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_buffer); + const int_tag_ty = ty.intTagType(); return func.valueAsI32(val, int_tag_ty); } }, - .Int => switch (ty.intInfo(func.target).signedness) { - .signed => return @truncate(i32, val.toSignedInt(target)), - .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))), + .Int => switch (ty.intInfo(mod).signedness) { + .signed => return @truncate(i32, val.toSignedInt(mod)), + .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(mod))), }, .ErrorSet => { const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function return @bitCast(i32, kv.value); }, - .Bool => return @intCast(i32, val.toSignedInt(target)), - .Pointer => return @intCast(i32, val.toSignedInt(target)), + .Bool => return @intCast(i32, val.toSignedInt(mod)), + .Pointer => return @intCast(i32, val.toSignedInt(mod)), else => unreachable, // Programmer called this function for an illegal type } } fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const block_ty = func.air.getRefType(ty_pl.ty); - const wasm_block_ty = genBlockType(block_ty, func.target); + const wasm_block_ty = genBlockType(block_ty, mod); const extra = func.air.extraData(Air.Block, ty_pl.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; // if wasm_block_ty is non-empty, we create a register to store the temporary value const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: { - const ty: Type = if (isByRef(block_ty, func.target)) Type.u32 else block_ty; + const ty: Type = if (isByRef(block_ty, mod)) Type.u32 else block_ty; break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten } else WValue.none; @@ -3379,16 +3397,17 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In /// NOTE: This leaves the result on top of the stack, rather than a new local. fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue { assert(!(lhs != .stack and rhs == .stack)); - if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for // both lhs and rhs, as well as checking the payload are matching of lhs and rhs return func.cmpOptionals(lhs, rhs, ty, op); } - } else if (isByRef(ty, func.target)) { + } else if (isByRef(ty, mod)) { return func.cmpBigInt(lhs, rhs, ty, op); } else if (ty.isAnyFloat() and ty.floatBits(func.target) == 16) { return func.cmpFloat16(lhs, rhs, op); @@ -3401,13 +3420,13 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (ty.zigTypeTag() != .Int) break :blk .unsigned; + if (ty.zigTypeTag(mod) != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk ty.intInfo(func.target).signedness; + break :blk ty.intInfo(mod).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, func.target), + .valtype1 = typeToValtype(ty, mod), .op = switch (op) { .lt => .lt, .lte => .le, @@ -3464,11 +3483,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const br = func.air.instructions.items(.data)[inst].br; const block = func.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) { + if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { const operand = try func.resolveInst(br.operand); try func.lowerToStack(operand); @@ -3490,16 +3510,17 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const operand_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; const result = result: { - if (operand_ty.zigTypeTag() == .Bool) { + if (operand_ty.zigTypeTag(mod) == .Bool) { try func.emitWValue(operand); try func.addTag(.i32_eqz); const not_tmp = try func.allocLocal(operand_ty); try func.addLabel(.local_set, not_tmp.local.value); break :result not_tmp; } else { - const operand_bits = operand_ty.intInfo(func.target).bits; + const operand_bits = operand_ty.intInfo(mod).bits; const wasm_bits = toWasmBits(operand_bits) orelse { return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits}); }; @@ -3566,16 +3587,17 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand; if (wanted_ty.tag() == .f16 or given_ty.tag() == .f16) return operand; - if (wanted_ty.bitSize(func.target) > 64) return operand; - assert((wanted_ty.isInt() and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt())); + if (wanted_ty.bitSize(mod) > 64) return operand; + assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod))); const opcode = buildOpcode(.{ .op = .reinterpret, - .valtype1 = typeToValtype(wanted_ty, func.target), - .valtype2 = typeToValtype(given_ty, func.target), + .valtype1 = typeToValtype(wanted_ty, mod), + .valtype2 = typeToValtype(given_ty, mod), }); try func.emitWValue(operand); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); @@ -3609,19 +3631,20 @@ fn structFieldPtr( struct_ty: Type, index: u32, ) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; const result_ty = func.air.typeOfIndex(inst); const offset = switch (struct_ty.containerLayout()) { - .Packed => switch (struct_ty.zigTypeTag()) { + .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { if (result_ty.ptrInfo().data.host_size != 0) { break :offset @as(u32, 0); } - break :offset struct_ty.packedStructFieldByteOffset(index, func.target); + break :offset struct_ty.packedStructFieldByteOffset(index, mod); }, .Union => 0, else => unreachable, }, - else => struct_ty.structFieldOffset(index, func.target), + else => struct_ty.structFieldOffset(index, mod), }; // save a load and store when we can simply reuse the operand if (offset == 0) { @@ -3636,6 +3659,7 @@ fn structFieldPtr( } fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data; @@ -3643,15 +3667,15 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); const result = switch (struct_ty.containerLayout()) { - .Packed => switch (struct_ty.zigTypeTag()) { + .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => result: { const struct_obj = struct_ty.castTag(.@"struct").?.data; - const offset = struct_obj.packedFieldBitOffset(func.target, field_index); + const offset = struct_obj.packedFieldBitOffset(mod, field_index); const backing_ty = struct_obj.backing_int_ty; - const wasm_bits = toWasmBits(backing_ty.intInfo(func.target).bits) orelse { + const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse { return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{}); }; const const_wvalue = if (wasm_bits == 32) @@ -3667,25 +3691,17 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else try func.binOp(operand, const_wvalue, backing_ty, .shr); - if (field_ty.zigTypeTag() == .Float) { - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&payload.base); + if (field_ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); - } else if (field_ty.isPtrAtRuntime() and struct_obj.fields.count() == 1) { + } else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) { // In this case we do not have to perform any transformations, // we can simply reuse the operand. break :result func.reuseOperand(struct_field.struct_operand, operand); - } else if (field_ty.isPtrAtRuntime()) { - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&payload.base); + } else if (field_ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); break :result try truncated.toLocal(func, field_ty); } @@ -3693,8 +3709,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try truncated.toLocal(func, field_ty); }, .Union => result: { - if (isByRef(struct_ty, func.target)) { - if (!isByRef(field_ty, func.target)) { + if (isByRef(struct_ty, mod)) { + if (!isByRef(field_ty, mod)) { const val = try func.load(operand, field_ty, 0); break :result try val.toLocal(func, field_ty); } else { @@ -3704,26 +3720,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, struct_ty.bitSize(func.target)), - }; - const union_int_type = Type.initPayload(&payload.base); - if (field_ty.zigTypeTag() == .Float) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod))); + if (field_ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(operand, int_type, union_int_type); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); - } else if (field_ty.isPtrAtRuntime()) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + } else if (field_ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(operand, int_type, union_int_type); break :result try truncated.toLocal(func, field_ty); } @@ -3733,11 +3737,10 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, }, else => result: { - const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)}); + const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, mod)) orelse { + return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(mod)}); }; - if (isByRef(field_ty, func.target)) { + if (isByRef(field_ty, mod)) { switch (operand) { .stack_offset => |stack_offset| { break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } }; @@ -3754,6 +3757,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; // result type is always 'noreturn' const blocktype = wasm.block_empty; const pl_op = func.air.instructions.items(.data)[inst].pl_op; @@ -3787,7 +3791,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { errdefer func.gpa.free(values); for (items, 0..) |ref, i| { - const item_val = func.air.value(ref).?; + const item_val = func.air.value(ref, mod).?; const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; @@ -3810,7 +3814,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the target is an integer size larger than u32, we have no way to use the value // as an index, therefore we also use an if/else-chain for those cases. // TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'. - const is_sparse = highest - lowest > 50 or target_ty.bitSize(func.target) > 32; + const is_sparse = highest - lowest > 50 or target_ty.bitSize(mod) > 32; const else_body = func.air.extra[extra_index..][0..switch_br.data.else_body_len]; const has_else_body = else_body.len != 0; @@ -3855,7 +3859,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // for errors that are not present in any branch. This is fine as this default // case will never be hit for those cases but we do save runtime cost and size // by using a jump table for this instead of if-else chains. - break :blk if (has_else_body or target_ty.zigTypeTag() == .ErrorSet) case_i else unreachable; + break :blk if (has_else_body or target_ty.zigTypeTag(mod) == .ErrorSet) case_i else unreachable; }; func.mir_extra.appendAssumeCapacity(idx); } else if (has_else_body) { @@ -3866,10 +3870,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; + if (target_ty.zigTypeTag(mod) != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk target_ty.intInfo(func.target).signedness; + break :blk target_ty.intInfo(mod).signedness; }; try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body)); @@ -3882,7 +3886,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(case.values[0].value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, func.target), + .valtype1 = typeToValtype(target_ty, mod), .op = .ne, // not equal, because we want to jump out of this block if it does not match the condition. .signedness = signedness, }); @@ -3896,7 +3900,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(value.value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, func.target), + .valtype1 = typeToValtype(target_ty, mod), .op = .eq, .signedness = signedness, }); @@ -3933,6 +3937,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const err_union_ty = func.air.typeOf(un_op); @@ -3948,10 +3953,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } try func.emitWValue(operand); - if (pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.addMemArg(.i32_load16_u, .{ - .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, func.target)), - .alignment = Type.anyerror.abiAlignment(func.target), + .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)), + .alignment = Type.anyerror.abiAlignment(mod), }); } @@ -3967,6 +3972,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -3975,15 +3981,15 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const payload_ty = err_ty.errorUnionPayload(); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (op_is_ptr) { break :result func.reuseOperand(ty_op.operand, operand); } break :result WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)); - if (op_is_ptr or isByRef(payload_ty, func.target)) { + const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + if (op_is_ptr or isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, pl_offset, .new); } @@ -3994,6 +4000,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo } fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4006,17 +4013,18 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) break :result WValue{ .imm32 = 0 }; } - if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, func.target))); + const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod))); break :result try error_val.toLocal(func, Type.anyerror); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4024,18 +4032,18 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void const pl_ty = func.air.typeOf(ty_op.operand); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); + const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); try func.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try func.emitWValue(err_union); try func.addImm32(0); - const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target)); + const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); break :result err_union; }; @@ -4043,6 +4051,7 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void } fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4050,17 +4059,17 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pl_ty = err_ty.errorUnionPayload(); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); // store error value - try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, func.target))); + try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod))); // write 'undefined' to the payload - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); - const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(func.target)); + const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); + const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(mod)); try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; @@ -4074,15 +4083,16 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.getRefType(ty_op.ty); const operand = try func.resolveInst(ty_op.operand); const operand_ty = func.air.typeOf(ty_op.operand); - if (ty.zigTypeTag() == .Vector or operand_ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) { return func.fail("todo Wasm intcast for vectors", .{}); } - if (ty.abiSize(func.target) > 16 or operand_ty.abiSize(func.target) > 16) { + if (ty.abiSize(mod) > 16 or operand_ty.abiSize(mod) > 16) { return func.fail("todo Wasm intcast for bitsize > 128", .{}); } - const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(func.target))).?; - const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?; + const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) else @@ -4096,8 +4106,9 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Asserts type's bitsize <= 128 /// NOTE: May leave the result on the top of the stack. fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const given_bitsize = @intCast(u16, given.bitSize(func.target)); - const wanted_bitsize = @intCast(u16, wanted.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const given_bitsize = @intCast(u16, given.bitSize(mod)); + const wanted_bitsize = @intCast(u16, wanted.bitSize(mod)); assert(given_bitsize <= 128); assert(wanted_bitsize <= 128); @@ -4110,7 +4121,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro try func.addTag(.i32_wrap_i64); } else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) { try func.emitWValue(operand); - try func.addTag(if (wanted.isSignedInt()) .i64_extend_i32_s else .i64_extend_i32_u); + try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u); } else if (wanted_bits == 128) { // for 128bit integers we store the integer in the virtual stack, rather than a local const stack_ptr = try func.allocStack(wanted); @@ -4119,14 +4130,14 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it // meaning less store operations are required. const lhs = if (op_bits == 32) blk: { - break :blk try func.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64); + break :blk try func.intcast(operand, given, if (wanted.isSignedInt(mod)) Type.i64 else Type.u64); } else operand; // store msb first try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset()); // For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value - if (wanted.isSignedInt()) { + if (wanted.isSignedInt(mod)) { try func.emitWValue(stack_ptr); const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr); try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset()); @@ -4154,16 +4165,16 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: /// For a given type and operand, checks if it's considered `null`. /// NOTE: Leaves the result on the stack fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; try func.emitWValue(operand); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (!optional_ty.optionalReprIsPayload()) { + if (!optional_ty.optionalReprIsPayload(mod)) { // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(module)}); + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(mod)}); }; try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); } @@ -4183,18 +4194,19 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod } fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const opt_ty = func.air.typeOf(ty_op.operand); const payload_ty = func.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.finishAir(inst, .none, &.{ty_op.operand}); } const result = result: { const operand = try func.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand); + if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand); - if (isByRef(payload_ty, func.target)) { + if (isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, 0, .new); } @@ -4209,10 +4221,11 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const mod = func.bin_file.base.options.module.?; const result = result: { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4222,22 +4235,22 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const opt_ty = func.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { return func.finishAir(inst, operand, &.{ty_op.operand}); } - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)}); + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(mod)}); }; try func.emitWValue(operand); @@ -4251,9 +4264,10 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const payload_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const non_null_bit = try func.allocStack(Type.initTag(.u1)); try func.emitWValue(non_null_bit); try func.addImm32(1); @@ -4263,12 +4277,11 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const op_ty = func.air.typeOfIndex(inst); - if (op_ty.optionalReprIsPayload()) { + if (op_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)}); + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(mod)}); }; // Create optional type, set the non-null bit, and store the operand inside the optional type @@ -4314,7 +4327,8 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); // load pointer onto stack _ = try func.load(slice, Type.usize, 0); @@ -4328,7 +4342,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result_ptr.local.value); - const result = if (!isByRef(elem_ty, func.target)) result: { + const result = if (!isByRef(elem_ty, mod)) result: { const elem_val = try func.load(result_ptr, elem_ty, 0); break :result try elem_val.toLocal(func, elem_ty); } else result_ptr; @@ -4341,7 +4355,8 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); @@ -4389,13 +4404,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Truncates a given operand to a given type, discarding any overflown bits. /// NOTE: Resulting value is left on the stack. fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue { - const given_bits = @intCast(u16, given_ty.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const given_bits = @intCast(u16, given_ty.bitSize(mod)); if (toWasmBits(given_bits) == null) { return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits}); } var result = try func.intcast(operand, given_ty, wanted_ty); - const wanted_bits = @intCast(u16, wanted_ty.bitSize(func.target)); + const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod)); const wasm_bits = toWasmBits(wanted_bits).?; if (wasm_bits != wanted_bits) { result = try func.wrapOperand(result, wanted_ty); @@ -4412,6 +4428,7 @@ fn airBoolToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); @@ -4422,7 +4439,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const slice_local = try func.allocStack(slice_ty); // store the array ptr in the slice - if (array_ty.hasRuntimeBitsIgnoreComptime()) { + if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.store(slice_local, operand, Type.usize, 0); } @@ -4454,7 +4471,8 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = ptr_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); // load pointer onto the stack if (ptr_ty.isSlice()) { @@ -4472,7 +4490,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_result = val: { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { break :val result; } defer result.free(func); // only free if it's not returned like above @@ -4489,7 +4507,8 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr_ty = func.air.typeOf(bin_op.lhs); const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); @@ -4513,6 +4532,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4524,13 +4544,13 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { else => ptr_ty.childType(), }; - const valtype = typeToValtype(Type.usize, func.target); + const valtype = typeToValtype(Type.usize, mod); const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul }); const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op }); try func.lowerToStack(ptr); try func.emitWValue(offset); - try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(func.target)))); + try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod)))); try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode)); try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode)); @@ -4572,7 +4592,8 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void /// this to wasm's memset instruction. When the feature is not present, /// we implement it manually. fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { - const abi_size = @intCast(u32, elem_ty.abiSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const abi_size = @intCast(u32, elem_ty.abiSize(mod)); // When bulk_memory is enabled, we lower it to wasm's memset instruction. // If not, we lower it ourselves. @@ -4666,24 +4687,25 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = array_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const elem_size = elem_ty.abiSize(mod); - if (isByRef(array_ty, func.target)) { + if (isByRef(array_ty, mod)) { try func.lowerToStack(array); try func.emitWValue(index); try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); try func.addTag(.i32_mul); try func.addTag(.i32_add); } else { - std.debug.assert(array_ty.zigTypeTag() == .Vector); + std.debug.assert(array_ty.zigTypeTag(mod) == .Vector); switch (index) { inline .imm32, .imm64 => |lane| { - const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(func.target)) { - 8 => if (elem_ty.isSignedInt()) .i8x16_extract_lane_s else .i8x16_extract_lane_u, - 16 => if (elem_ty.isSignedInt()) .i16x8_extract_lane_s else .i16x8_extract_lane_u, - 32 => if (elem_ty.isInt()) .i32x4_extract_lane else .f32x4_extract_lane, - 64 => if (elem_ty.isInt()) .i64x2_extract_lane else .f64x2_extract_lane, + const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(mod)) { + 8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u, + 16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u, + 32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane, + 64 => if (elem_ty.isInt(mod)) .i64x2_extract_lane else .f64x2_extract_lane, else => unreachable, }; @@ -4715,7 +4737,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { break :val result; } defer result.free(func); // only free if no longer needed and not returned like above @@ -4733,17 +4755,18 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const dest_ty = func.air.typeOfIndex(inst); const op_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; - if (op_ty.abiSize(func.target) > 8) { + if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{}); } try func.emitWValue(operand); const op = buildOpcode(.{ .op = .trunc, - .valtype1 = typeToValtype(dest_ty, func.target), - .valtype2 = typeToValtype(op_ty, func.target), - .signedness = if (dest_ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(dest_ty, mod), + .valtype2 = typeToValtype(op_ty, mod), + .signedness = if (dest_ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); const wrapped = try func.wrapOperand(.{ .stack = {} }, dest_ty); @@ -4757,17 +4780,18 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const dest_ty = func.air.typeOfIndex(inst); const op_ty = func.air.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; - if (op_ty.abiSize(func.target) > 8) { + if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{}); } try func.emitWValue(operand); const op = buildOpcode(.{ .op = .convert, - .valtype1 = typeToValtype(dest_ty, func.target), - .valtype2 = typeToValtype(op_ty, func.target), - .signedness = if (op_ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(dest_ty, mod), + .valtype2 = typeToValtype(op_ty, mod), + .signedness = if (op_ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); @@ -4777,18 +4801,19 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.typeOfIndex(inst); const elem_ty = ty.childType(); - if (determineSimdStoreStrategy(ty, func.target) == .direct) blk: { + if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { switch (operand) { // when the operand lives in the linear memory section, we can directly // load and splat the value at once. Meaning we do not first have to load // the scalar value onto the stack. .stack_offset, .memory, .memory_offset => { - const opcode = switch (elem_ty.bitSize(func.target)) { + const opcode = switch (elem_ty.bitSize(mod)) { 8 => std.wasm.simdOpcode(.v128_load8_splat), 16 => std.wasm.simdOpcode(.v128_load16_splat), 32 => std.wasm.simdOpcode(.v128_load32_splat), @@ -4803,18 +4828,18 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.mir_extra.appendSlice(func.gpa, &[_]u32{ opcode, operand.offset(), - elem_ty.abiAlignment(func.target), + elem_ty.abiAlignment(mod), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); try func.addLabel(.local_set, result.local.value); return func.finishAir(inst, result, &.{ty_op.operand}); }, .local => { - const opcode = switch (elem_ty.bitSize(func.target)) { + const opcode = switch (elem_ty.bitSize(mod)) { 8 => std.wasm.simdOpcode(.i8x16_splat), 16 => std.wasm.simdOpcode(.i16x8_splat), - 32 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), - 64 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), + 32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), + 64 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), else => break :blk, // Cannot make use of simd-instructions }; const result = try func.allocLocal(ty); @@ -4828,14 +4853,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, } } - const elem_size = elem_ty.bitSize(func.target); + const elem_size = elem_ty.bitSize(mod); const vector_len = @intCast(usize, ty.vectorLen()); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } const result = try func.allocStack(ty); - const elem_byte_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod)); var index: usize = 0; var offset: u32 = 0; while (index < vector_len) : (index += 1) { @@ -4855,6 +4880,7 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const inst_ty = func.air.typeOfIndex(inst); const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -4865,16 +4891,15 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mask_len = extra.mask_len; const child_ty = inst_ty.childType(); - const elem_size = child_ty.abiSize(func.target); + const elem_size = child_ty.abiSize(mod); - const module = func.bin_file.base.options.module.?; // TODO: One of them could be by ref; handle in loop - if (isByRef(func.air.typeOf(extra.a), func.target) or isByRef(inst_ty, func.target)) { + if (isByRef(func.air.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { var buf: Value.ElemValueBuffer = undefined; - const value = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target); + const value = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); try func.emitWValue(result); @@ -4895,7 +4920,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lanes = std.mem.asBytes(operands[1..]); for (0..@intCast(usize, mask_len)) |index| { var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target); + const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); const base_index = if (mask_elem >= 0) @intCast(u8, @intCast(i64, elem_size) * mask_elem) else @@ -4930,13 +4955,14 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ty = func.air.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); + const mod = func.bin_file.base.options.module.?; const result: WValue = result_value: { - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Array => { const result = try func.allocStack(result_ty); const elem_ty = result_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const sentinel = if (result_ty.sentinel()) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; @@ -4944,7 +4970,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the element type is by reference, we must copy the entire // value. It is therefore safer to move the offset pointer and store // each value individually, instead of using store offsets. - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { // copy stack pointer into a temporary local, which is // moved for each element to store each value in the right position. const offset = try func.buildPointerOffset(result, 0, .new); @@ -4974,7 +5000,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .Struct => switch (result_ty.containerLayout()) { .Packed => { - if (isByRef(result_ty, func.target)) { + if (isByRef(result_ty, mod)) { return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); } const struct_obj = result_ty.castTag(.@"struct").?.data; @@ -4983,7 +5009,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // ensure the result is zero'd const result = try func.allocLocal(backing_type); - if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + if (struct_obj.backing_int_ty.bitSize(mod) <= 32) try func.addImm32(0) else try func.addImm64(0); @@ -4992,20 +5018,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var current_bit: u16 = 0; for (elements, 0..) |elem, elem_index| { const field = fields[elem_index]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const shift_val = if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32) WValue{ .imm32 = current_bit } else WValue{ .imm64 = current_bit }; const value = try func.resolveInst(elem); - const value_bit_size = @intCast(u16, field.ty.bitSize(func.target)); - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = value_bit_size, - }; - const int_ty = Type.initPayload(&int_ty_payload.base); + const value_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const int_ty = try mod.intType(.unsigned, value_bit_size); // load our current result on stack so we can perform all transformations // using only stack values. Saving the cost of loads and stores. @@ -5027,10 +5049,10 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(result_ty); const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset for (elements, 0..) |elem, elem_index| { - if (result_ty.structFieldValueComptime(elem_index) != null) continue; + if (result_ty.structFieldValueComptime(mod, elem_index) != null) continue; const elem_ty = result_ty.structFieldType(elem_index); - const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const value = try func.resolveInst(elem); try func.store(offset, value, elem_ty, 0); @@ -5058,12 +5080,13 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; const result = result: { const union_ty = func.air.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(func.target); + const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.values()[extra.field_index]; const field_name = union_obj.fields.keys()[extra.field_index]; @@ -5082,15 +5105,15 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (layout.tag_size == 0) { break :result WValue{ .none = {} }; } - assert(!isByRef(union_ty, func.target)); + assert(!isByRef(union_ty, mod)); break :result tag_int; } - if (isByRef(union_ty, func.target)) { + if (isByRef(union_ty, mod)) { const result_ptr = try func.allocStack(union_ty); const payload = try func.resolveInst(extra.init); if (layout.tag_align >= layout.payload_align) { - if (isByRef(field.ty, func.target)) { + if (isByRef(field.ty, mod)) { const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); try func.store(payload_ptr, payload, field.ty, 0); } else { @@ -5114,26 +5137,14 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result result_ptr; } else { const operand = try func.resolveInst(extra.init); - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, union_ty.bitSize(func.target)), - }; - const union_int_type = Type.initPayload(&payload.base); - if (field.ty.zigTypeTag() == .Float) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field.ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod))); + if (field.ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); const bitcasted = try func.bitcast(field.ty, int_type, operand); const casted = try func.trunc(bitcasted, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); - } else if (field.ty.isPtrAtRuntime()) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field.ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + } else if (field.ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); const casted = try func.intcast(operand, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); } @@ -5171,7 +5182,8 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { } fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.hasRuntimeBitsIgnoreComptime()); + const mod = func.bin_file.base.options.module.?; + assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod)); assert(op == .eq or op == .neq); var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); @@ -5189,7 +5201,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: _ = try func.load(lhs, payload_ty, 0); _ = try func.load(rhs, payload_ty, 0); - const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) }); + const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, mod) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); try func.addLabel(.br_if, 0); @@ -5207,10 +5219,11 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: /// NOTE: Leaves the result of the comparison on top of the stack. /// TODO: Lower this to compiler_rt call when bitsize > 128 fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.abiSize(func.target) >= 16); + const mod = func.bin_file.base.options.module.?; + assert(operand_ty.abiSize(mod) >= 16); assert(!(lhs != .stack and rhs == .stack)); - if (operand_ty.bitSize(func.target) > 128) { - return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(func.target)}); + if (operand_ty.bitSize(mod) > 128) { + return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(mod)}); } var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); @@ -5233,7 +5246,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } }, else => { - const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64; + const ty = if (operand_ty.isSignedInt(mod)) Type.i64 else Type.u64; // leave those value on top of the stack for '.select' const lhs_low_bit = try func.load(lhs, Type.u64, 8); const rhs_low_bit = try func.load(rhs, Type.u64, 8); @@ -5248,10 +5261,11 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const un_ty = func.air.typeOf(bin_op.lhs).childType(); const tag_ty = func.air.typeOf(bin_op.rhs); - const layout = un_ty.unionGetLayout(func.target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); const union_ptr = try func.resolveInst(bin_op.lhs); @@ -5271,11 +5285,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const un_ty = func.air.typeOf(ty_op.operand); const tag_ty = func.air.typeOfIndex(inst); - const layout = un_ty.unionGetLayout(func.target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand}); const operand = try func.resolveInst(ty_op.operand); @@ -5375,6 +5390,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const err_set_ty = func.air.typeOf(ty_op.operand).childType(); @@ -5386,26 +5402,27 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi operand, .{ .imm32 = 0 }, Type.anyerror, - @intCast(u32, errUnionErrorOffset(payload_ty, func.target)), + @intCast(u32, errUnionErrorOffset(payload_ty, mod)), ); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)), .new); + break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try func.resolveInst(extra.field_ptr); const parent_ty = func.air.getRefType(ty_pl.ty).childType(); - const field_offset = parent_ty.structFieldOffset(extra.field_index, func.target); + const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const result = if (field_offset != 0) result: { const base = try func.buildPointerOffset(field_ptr, 0, .new); @@ -5428,6 +5445,7 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue } fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); const dst_ty = func.air.typeOf(bin_op.lhs); @@ -5437,16 +5455,16 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const len = switch (dst_ty.ptrSize()) { .Slice => blk: { const slice_len = try func.sliceLen(dst); - if (ptr_elem_ty.abiSize(func.target) != 1) { + if (ptr_elem_ty.abiSize(mod) != 1) { try func.emitWValue(slice_len); - try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(func.target)) }); + try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) }); try func.addTag(.i32_mul); try func.addLabel(.local_set, slice_len.local.value); } break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(func.target)), + .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(mod)), }), .C, .Many => unreachable, }; @@ -5472,12 +5490,13 @@ fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const op_ty = func.air.typeOf(ty_op.operand); const result_ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; - if (op_ty.zigTypeTag() == .Vector) { + if (op_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement @popCount for vectors", .{}); } - const int_info = op_ty.intInfo(func.target); + const int_info = op_ty.intInfo(mod); const bits = int_info.bits; const wasm_bits = toWasmBits(bits) orelse { return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits}); @@ -5527,7 +5546,8 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); const name_ty = Type.initTag(.const_slice_u8_sentinel_0); - const abi_size = name_ty.abiSize(func.target); + const mod = func.bin_file.base.options.module.?; + const abi_size = name_ty.abiSize(mod); const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation try func.emitWValue(error_name_value); @@ -5566,12 +5586,13 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lhs_op = try func.resolveInst(extra.lhs); const rhs_op = try func.resolveInst(extra.rhs); const lhs_ty = func.air.typeOf(extra.lhs); + const mod = func.bin_file.base.options.module.?; - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits}); @@ -5630,15 +5651,16 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; assert(op == .add or op == .sub); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits != 128) { return func.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits}); @@ -5701,6 +5723,7 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, } fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; @@ -5709,11 +5732,11 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs_ty = func.air.typeOf(extra.lhs); const rhs_ty = func.air.typeOf(extra.rhs); - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); @@ -5721,7 +5744,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // Ensure rhs is coerced to lhs as they must have the same WebAssembly types // before we can perform any binary operation. - const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(func.target).bits).?; + const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(mod).bits).?; const rhs_final = if (wasm_bits != rhs_wasm_bits) blk: { const rhs_casted = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try rhs_casted.toLocal(func, lhs_ty); @@ -5750,7 +5773,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -5763,8 +5786,9 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); const lhs_ty = func.air.typeOf(extra.lhs); + const mod = func.bin_file.base.options.module.?; - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } @@ -5773,7 +5797,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1)); defer overflow_bit.free(func); - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits}); }; @@ -5924,7 +5948,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); @@ -5934,11 +5958,12 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); } - if (ty.abiSize(func.target) > 16) { + if (ty.abiSize(mod) > 16) { return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{}); } @@ -5954,7 +5979,7 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE try func.addTag(.select); // store result in local - const result_ty = if (isByRef(ty, func.target)) Type.u32 else ty; + const result_ty = if (isByRef(ty, mod)) Type.u32 else ty; const result = try func.allocLocal(result_ty); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); @@ -5965,7 +5990,8 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data; const ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@mulAdd` for vectors", .{}); } @@ -5998,12 +6024,13 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.typeOf(ty_op.operand); const result_ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@clz` for vectors", .{}); } const operand = try func.resolveInst(ty_op.operand); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; @@ -6051,12 +6078,13 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.typeOf(ty_op.operand); const result_ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@ctz` for vectors", .{}); } const operand = try func.resolveInst(ty_op.operand); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; @@ -6174,12 +6202,13 @@ fn lowerTry( err_union_ty: Type, operand_is_ptr: bool, ) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; if (operand_is_ptr) { return func.fail("TODO: lowerTry for pointers", .{}); } const pl_ty = err_union_ty.errorUnionPayload(); - const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(); + const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod); if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { // Block we can jump out of when error is not set @@ -6188,10 +6217,10 @@ fn lowerTry( // check if the error tag is set for the error union. try func.emitWValue(err_union); if (pl_has_bits) { - const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target)); + const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); try func.addMemArg(.i32_load16_u, .{ .offset = err_union.offset() + err_offset, - .alignment = Type.anyerror.abiAlignment(func.target), + .alignment = Type.anyerror.abiAlignment(mod), }); } try func.addTag(.i32_eqz); @@ -6213,8 +6242,8 @@ fn lowerTry( return WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)); - if (isByRef(pl_ty, func.target)) { + const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod)); + if (isByRef(pl_ty, mod)) { return buildPointerOffset(func, err_union, pl_offset, .new); } const payload = try func.load(err_union, pl_ty, pl_offset); @@ -6226,11 +6255,12 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); + const mod = func.bin_file.base.options.module.?; - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: @byteSwap for vectors", .{}); } - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); // bytes are no-op if (int_info.bits == 8) { @@ -6292,13 +6322,14 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const result = if (ty.isSignedInt()) + const result = if (ty.isSignedInt(mod)) try func.divSigned(lhs, rhs, ty) else try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); @@ -6306,13 +6337,14 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const div_result = if (ty.isSignedInt()) + const div_result = if (ty.isSignedInt(mod)) try func.divSigned(lhs, rhs, ty) else try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); @@ -6328,15 +6360,16 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; + const mod = func.bin_file.base.options.module.?; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - if (ty.isUnsignedInt()) { + if (ty.isUnsignedInt(mod)) { const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); - } else if (ty.isSignedInt()) { - const int_bits = ty.intInfo(func.target).bits; + } else if (ty.isSignedInt(mod)) { + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits}); }; @@ -6414,7 +6447,8 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue { - const int_bits = ty.intInfo(func.target).bits; + const mod = func.bin_file.base.options.module.?; + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits}); }; @@ -6441,7 +6475,8 @@ fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WVal /// Retrieves the absolute value of a signed integer /// NOTE: Leaves the result value on the stack. fn signAbsValue(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - const int_bits = ty.intInfo(func.target).bits; + const mod = func.bin_file.base.options.module.?; + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: signAbsValue for signed integers larger than '{d}' bits", .{int_bits}); }; @@ -6476,11 +6511,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .add or op == .sub); const bin_op = func.air.instructions.items(.data)[inst].bin_op; + const mod = func.bin_file.base.options.module.?; const ty = func.air.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { @@ -6523,7 +6559,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue { - const int_info = ty.intInfo(func.target); + const mod = func.bin_file.base.options.module.?; + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits).?; const is_wasm_bits = wasm_bits == int_info.bits; @@ -6588,8 +6625,9 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; + const mod = func.bin_file.base.options.module.?; const ty = func.air.typeOfIndex(inst); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits}); @@ -6707,12 +6745,13 @@ fn callIntrinsic( }; // Always pass over C-ABI - var func_type = try genFunctype(func.gpa, .C, param_types, return_type, func.target); + const mod = func.bin_file.base.options.module.?; + var func_type = try genFunctype(func.gpa, .C, param_types, return_type, mod); defer func_type.deinit(func.gpa); const func_type_index = try func.bin_file.putOrGetFuncType(func_type); try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index); - const want_sret_param = firstParamSRet(.C, return_type, func.target); + const want_sret_param = firstParamSRet(.C, return_type, mod); // if we want return as first param, we allocate a pointer to stack, // and emit it as our first argument const sret = if (want_sret_param) blk: { @@ -6724,14 +6763,14 @@ fn callIntrinsic( // Lower all arguments to the stack before we call our function for (args, 0..) |arg, arg_i| { assert(!(want_sret_param and arg == .stack)); - assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime()); + assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime(mod)); try func.lowerArg(.C, param_types[arg_i], arg); } // Actually call our intrinsic try func.addLabel(.call, symbol_index); - if (!return_type.hasRuntimeBitsIgnoreComptime()) { + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { return WValue.none; } else if (return_type.isNoReturn()) { try func.addTag(.@"unreachable"); @@ -6759,15 +6798,15 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { + const mod = func.bin_file.base.options.module.?; const enum_decl_index = enum_ty.getOwnerDecl(); - const module = func.bin_file.base.options.module.?; var arena_allocator = std.heap.ArenaAllocator.init(func.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try module.declPtr(enum_decl_index).getFullyQualifiedName(module); - defer module.gpa.free(fqn); + const fqn = try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod); + defer mod.gpa.free(fqn); const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); // check if we already generated code for this. @@ -6775,10 +6814,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { return loc.index; } - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(); - if (int_tag_ty.bitSize(func.target) > 64) { + if (int_tag_ty.bitSize(mod) > 64) { return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); } @@ -6806,9 +6844,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { .data = @intCast(u64, tag_name.len), }; const name_ty = Type.initPayload(&name_ty_payload.base); - const string_bytes = &module.string_literal_bytes; - try string_bytes.ensureUnusedCapacity(module.gpa, tag_name.len); - const gop = try module.string_literal_table.getOrPutContextAdapted(module.gpa, tag_name, Module.StringLiteralAdapter{ + const string_bytes = &mod.string_literal_bytes; + try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); + const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{ .bytes = string_bytes, }, Module.StringLiteralContext{ .bytes = string_bytes, @@ -6929,7 +6967,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.end)); const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, func.target); + const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } @@ -6944,11 +6982,11 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len); defer values.deinit(); - const module = func.bin_file.base.options.module.?; + const mod = func.bin_file.base.options.module.?; var lowest: ?u32 = null; var highest: ?u32 = null; for (names) |name| { - const err_int = module.global_error_set.get(name).?; + const err_int = mod.global_error_set.get(name).?; if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; @@ -7019,6 +7057,7 @@ inline fn useAtomicFeature(func: *const CodeGen) bool { } fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; @@ -7037,7 +7076,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr_operand); try func.lowerToStack(expected_val); try func.lowerToStack(new_val); - try func.addAtomicMemArg(switch (ty.abiSize(func.target)) { + try func.addAtomicMemArg(switch (ty.abiSize(mod)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7045,14 +7084,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}), }, .{ .offset = ptr_operand.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); try func.addLabel(.local_tee, val_local.local.value); _ = try func.cmp(.stack, expected_val, ty, .eq); try func.addLabel(.local_set, cmp_result.local.value); break :val val_local; } else val: { - if (ty.abiSize(func.target) > 8) { + if (ty.abiSize(mod) > 8) { return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{}); } const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty); @@ -7068,7 +7107,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val ptr_val; }; - const result_ptr = if (isByRef(result_ty, func.target)) val: { + const result_ptr = if (isByRef(result_ty, mod)) val: { try func.emitWValue(cmp_result); try func.addImm32(-1); try func.addTag(.i32_xor); @@ -7076,7 +7115,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_and); const and_result = try WValue.toLocal(.stack, func, Type.bool); const result_ptr = try func.allocStack(result_ty); - try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(func.target))); + try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod))); try func.store(result_ptr, ptr_val, ty, 0); break :val result_ptr; } else val: { @@ -7091,12 +7130,13 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const atomic_load = func.air.instructions.items(.data)[inst].atomic_load; const ptr = try func.resolveInst(atomic_load.ptr); const ty = func.air.typeOfIndex(inst); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => .i32_atomic_load8_u, 2 => .i32_atomic_load16_u, 4 => .i32_atomic_load, @@ -7106,7 +7146,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); } else { _ = try func.load(ptr, ty, 0); @@ -7117,6 +7157,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data; @@ -7140,7 +7181,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.emitWValue(value); if (op == .Nand) { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; const and_res = try func.binOp(value, operand, ty, .@"and"); if (wasm_bits == 32) @@ -7157,7 +7198,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.select); } try func.addAtomicMemArg( - switch (ty.abiSize(func.target)) { + switch (ty.abiSize(mod)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7166,7 +7207,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }, ); const select_res = try func.allocLocal(ty); @@ -7185,7 +7226,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => { try func.emitWValue(ptr); try func.emitWValue(operand); - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => switch (op) { .Xchg => .i32_atomic_rmw8_xchg_u, .Add => .i32_atomic_rmw8_add_u, @@ -7226,7 +7267,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); const result = try WValue.toLocal(.stack, func, ty); return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand }); @@ -7255,7 +7296,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .Xor => .xor, else => unreachable, }); - if (ty.isInt() and (op == .Add or op == .Sub)) { + if (ty.isInt(mod) and (op == .Add or op == .Sub)) { _ = try func.wrapOperand(.stack, ty); } try func.store(.stack, .stack, ty, ptr.offset()); @@ -7271,7 +7312,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.store(.stack, .stack, ty, ptr.offset()); }, .Nand => { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; try func.emitWValue(ptr); const and_res = try func.binOp(result, operand, ty, .@"and"); @@ -7302,6 +7343,7 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr = try func.resolveInst(bin_op.lhs); @@ -7310,7 +7352,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = ptr_ty.childType(); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => .i32_atomic_store8, 2 => .i32_atomic_store16, 4 => .i32_atomic_store, @@ -7321,7 +7363,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.lowerToStack(operand); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); } else { try func.store(ptr, operand, ty, 0); diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 4692f65dd1..7dd4425c01 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -5,9 +5,11 @@ //! Note: Above mentioned document is not an official specification, therefore called a convention. const std = @import("std"); -const Type = @import("../../type.zig").Type; const Target = std.Target; +const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); + /// Defines how to pass a type as part of a function signature, /// both for parameters as well as return values. pub const Class = enum { direct, indirect, none }; @@ -19,12 +21,13 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, target: Target) [2]Class { - if (!ty.hasRuntimeBitsIgnoreComptime()) return none; - switch (ty.zigTypeTag()) { +pub fn classifyType(ty: Type, mod: *const Module) [2]Class { + const target = mod.getTarget(); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; + switch (ty.zigTypeTag(mod)) { .Struct => { if (ty.containerLayout() == .Packed) { - if (ty.bitSize(target) <= 64) return direct; + if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } // When the struct type is non-scalar @@ -32,14 +35,14 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { // When the struct's alignment is non-natural const field = ty.structFields().values()[0]; if (field.abi_align != 0) { - if (field.abi_align > field.ty.abiAlignment(target)) { + if (field.abi_align > field.ty.abiAlignment(mod)) { return memory; } } - return classifyType(field.ty, target); + return classifyType(field.ty, mod); }, .Int, .Enum, .ErrorSet, .Vector => { - const int_bits = ty.intInfo(target).bits; + const int_bits = ty.intInfo(mod).bits; if (int_bits <= 64) return direct; if (int_bits <= 128) return .{ .direct, .direct }; return memory; @@ -53,7 +56,7 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { .Bool => return direct, .Array => return memory, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return direct; }, .Pointer => { @@ -62,13 +65,13 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { }, .Union => { if (ty.containerLayout() == .Packed) { - if (ty.bitSize(target) <= 64) return direct; + if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); std.debug.assert(layout.tag_size == 0); if (ty.unionFields().count() > 1) return memory; - return classifyType(ty.unionFields().values()[0].ty, target); + return classifyType(ty.unionFields().values()[0].ty, mod); }, .ErrorUnion, .Frame, @@ -90,29 +93,29 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { /// Returns the scalar type a given type can represent. /// Asserts given type can be represented as scalar, such as /// a struct with a single scalar field. -pub fn scalarType(ty: Type, target: std.Target) Type { - switch (ty.zigTypeTag()) { +pub fn scalarType(ty: Type, mod: *const Module) Type { + switch (ty.zigTypeTag(mod)) { .Struct => { switch (ty.containerLayout()) { .Packed => { const struct_obj = ty.castTag(.@"struct").?.data; - return scalarType(struct_obj.backing_int_ty, target); + return scalarType(struct_obj.backing_int_ty, mod); }, else => { std.debug.assert(ty.structFieldCount() == 1); - return scalarType(ty.structFieldType(0), target); + return scalarType(ty.structFieldType(0), mod); }, } }, .Union => { if (ty.containerLayout() != .Packed) { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0 and layout.tag_size != 0) { - return scalarType(ty.unionTagTypeSafety().?, target); + return scalarType(ty.unionTagTypeSafety().?, mod); } std.debug.assert(ty.unionFields().count() == 1); } - return scalarType(ty.unionFields().values()[0].ty, target); + return scalarType(ty.unionFields().values()[0].ty, mod); }, else => return ty, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b614200e41..826bca2266 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -605,14 +605,14 @@ const FrameAlloc = struct { .ref_count = 0, }; } - fn initType(ty: Type, target: Target) FrameAlloc { - return init(.{ .size = ty.abiSize(target), .alignment = ty.abiAlignment(target) }); + fn initType(ty: Type, mod: *const Module) FrameAlloc { + return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) }); } }; const StackAllocation = struct { inst: ?Air.Inst.Index, - /// TODO do we need size? should be determined by inst.ty.abiSize(self.target.*) + /// TODO do we need size? should be determined by inst.ty.abiSize(mod) size: u32, }; @@ -714,12 +714,12 @@ pub fn generate( function.args = call_info.args; function.ret_mcv = call_info.return_value; function.frame_allocs.set(@enumToInt(FrameIndex.ret_addr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(function.target.*), - .alignment = @min(Type.usize.abiAlignment(function.target.*), call_info.stack_align), + .size = Type.usize.abiSize(mod), + .alignment = @min(Type.usize.abiAlignment(mod), call_info.stack_align), })); function.frame_allocs.set(@enumToInt(FrameIndex.base_ptr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(function.target.*), - .alignment = @min(Type.usize.abiAlignment(function.target.*) * 2, call_info.stack_align), + .size = Type.usize.abiSize(mod), + .alignment = @min(Type.usize.abiAlignment(mod) * 2, call_info.stack_align), })); function.frame_allocs.set( @enumToInt(FrameIndex.args_frame), @@ -1565,6 +1565,7 @@ fn asmMemoryRegisterImmediate( } fn gen(self: *Self) InnerError!void { + const mod = self.bin_file.options.module.?; const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { try self.asmRegister(.{ ._, .push }, .rbp); @@ -1582,7 +1583,7 @@ fn gen(self: *Self) InnerError!void { // register which the callee is free to clobber. Therefore, we purposely // spill it to stack immediately. const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(Type.usize, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(Type.usize, mod)); try self.genSetMem( .{ .frame = frame_index }, 0, @@ -1999,7 +2000,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { - switch (lazy_sym.ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lazy_sym.ty.zigTypeTag(mod)) { .Enum => { const enum_ty = lazy_sym.ty; wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(self.bin_file.options.module.?)}); @@ -2127,8 +2129,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } self.finishAirResult(inst, result); @@ -2252,14 +2254,14 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { + const mod = self.bin_file.options.module.?; const ptr_ty = self.air.typeOfIndex(inst); const val_ty = ptr_ty.childType(); return self.allocFrameIndex(FrameAlloc.init(.{ - .size = math.cast(u32, val_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + .size = math.cast(u32, val_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)}); }, - .alignment = @max(ptr_ty.ptrAlignment(self.target.*), 1), + .alignment = @max(ptr_ty.ptrAlignment(mod), 1), })); } @@ -2272,19 +2274,19 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { } fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { - const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; if (reg_ok) need_mem: { - if (abi_size <= @as(u32, switch (ty.zigTypeTag()) { + if (abi_size <= @as(u32, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16, 32, 64, 128 => 16, 80 => break :need_mem, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16, 80 => break :need_mem, @@ -2294,18 +2296,18 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b }, else => 8, })) { - if (self.register_manager.tryAllocReg(inst, regClassForType(ty))) |reg| { + if (self.register_manager.tryAllocReg(inst, regClassForType(ty, mod))) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } } - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, mod)); return .{ .load_frame = .{ .index = frame_index } }; } -fn regClassForType(ty: Type) RegisterManager.RegisterBitSet { - return switch (ty.zigTypeTag()) { +fn regClassForType(ty: Type, mod: *const Module) RegisterManager.RegisterBitSet { + return switch (ty.zigTypeTag(mod)) { .Float, .Vector => sse, else => gp, }; @@ -2449,7 +2451,8 @@ pub fn spillRegisters(self: *Self, registers: []const Register) !void { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, regClassForType(ty)); + const mod = self.bin_file.options.module.?; + const reg = try self.register_manager.allocReg(null, regClassForType(ty, mod)); try self.genSetReg(reg, ty, mcv); return reg; } @@ -2464,7 +2467,8 @@ fn copyToRegisterWithInstTracking( ty: Type, mcv: MCValue, ) !MCValue { - const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty)); + const mod = self.bin_file.options.module.?; + const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty, mod)); try self.genSetReg(reg, ty, mcv); return MCValue{ .register = reg }; } @@ -2618,14 +2622,15 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.air.typeOf(ty_op.operand); - const src_int_info = src_ty.intInfo(self.target.*); + const src_int_info = src_ty.intInfo(mod); const dst_ty = self.air.typeOfIndex(inst); - const dst_int_info = dst_ty.intInfo(self.target.*); - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_int_info = dst_ty.intInfo(mod); + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; const extend = switch (src_int_info.signedness) { @@ -2670,14 +2675,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const high_bits = src_int_info.bits % 64; if (high_bits > 0) { - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (extend) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = high_bits, - }; - const high_ty = Type.initPayload(&high_pl.base); + const high_ty = try mod.intType(extend, high_bits); try self.truncateRegister(high_ty, high_reg); try self.genCopy(Type.usize, high_mcv, .{ .register = high_reg }); } @@ -2706,12 +2704,13 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { } fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.air.typeOfIndex(inst); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const result = result: { const src_mcv = try self.resolveInst(ty_op.operand); @@ -2724,10 +2723,10 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - if (dst_ty.zigTypeTag() == .Vector) { - assert(src_ty.zigTypeTag() == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); - const dst_info = dst_ty.childType().intInfo(self.target.*); - const src_info = src_ty.childType().intInfo(self.target.*); + if (dst_ty.zigTypeTag(mod) == .Vector) { + assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); + const dst_info = dst_ty.childType().intInfo(mod); + const src_info = src_ty.childType().intInfo(mod); const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) { 8 => switch (src_info.bits) { 16 => switch (dst_ty.vectorLen()) { @@ -2775,7 +2774,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { }, }; const full_ty = Type.initPayload(&full_pl.base); - const full_abi_size = @intCast(u32, full_ty.abiSize(self.target.*)); + const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); const splat_addr_mcv: MCValue = switch (splat_mcv) { @@ -2831,6 +2830,7 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -2840,11 +2840,11 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const len = try self.resolveInst(bin_op.rhs); const len_ty = self.air.typeOf(bin_op.rhs); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(self.target.*)), + @intCast(i32, ptr_ty.abiSize(mod)), len_ty, len, ); @@ -2873,23 +2873,24 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void } fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { + const mod = self.bin_file.options.module.?; const air_tag = self.air.instructions.items(.tag); const air_data = self.air.instructions.items(.data); const dst_ty = self.air.typeOf(dst_air); - const dst_info = dst_ty.intInfo(self.target.*); + const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { .constant => { const src_val = self.air.values[air_data[inst].ty_pl.payload]; var space: Value.BigIntSpace = undefined; - const src_int = src_val.toBigInt(&space, self.target.*); + const src_int = src_val.toBigInt(&space, mod); return @intCast(u16, src_int.bitCountTwosComp()) + @boolToInt(src_int.positive and dst_info.signedness == .signed); }, .intcast => { const src_ty = self.air.typeOf(air_data[inst].ty_op.operand); - const src_info = src_ty.intInfo(self.target.*); + const src_info = src_ty.intInfo(mod); return @min(switch (src_info.signedness) { .signed => switch (dst_info.signedness) { .signed => src_info.bits, @@ -2908,20 +2909,18 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { } fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result = result: { const tag = self.air.instructions.items(.tag)[inst]; const dst_ty = self.air.typeOfIndex(inst); - switch (dst_ty.zigTypeTag()) { + switch (dst_ty.zigTypeTag(mod)) { .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs), else => {}, } - const dst_info = dst_ty.intInfo(self.target.*); - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = switch (tag) { + const dst_info = dst_ty.intInfo(mod); + const src_ty = try mod.intType(dst_info.signedness, switch (tag) { else => unreachable, .mul, .mulwrap => math.max3( self.activeIntBits(bin_op.lhs), @@ -2929,8 +2928,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { dst_info.bits / 2, ), .div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits, - } }; - const src_ty = Type.initPayload(&src_pl.base); + }); try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); @@ -2942,6 +2940,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -2968,7 +2967,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const reg_extra_bits = self.regExtraBits(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { if (reg_extra_bits > 0) { try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -2994,7 +2993,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(self.target.*)), + .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(mod)), }); try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); @@ -3005,14 +3004,14 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), cc, ); - if (reg_extra_bits > 0 and ty.isSignedInt()) { + if (reg_extra_bits > 0 and ty.isSignedInt(mod)) { try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3020,6 +3019,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { } fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -3046,7 +3046,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const reg_extra_bits = self.regExtraBits(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { if (reg_extra_bits > 0) { try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3076,14 +3076,14 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), cc, ); - if (reg_extra_bits > 0 and ty.isSignedInt()) { + if (reg_extra_bits > 0 and ty.isSignedInt(mod)) { try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3091,6 +3091,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { } fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -3118,7 +3119,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(limit_lock); const reg_bits = self.regBitSize(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { try self.genSetReg(limit_reg, ty, lhs_mcv); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv); try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); @@ -3134,7 +3135,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { }; const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv); - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_mcv.register, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3145,12 +3146,13 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const tag = self.air.instructions.items(.tag)[inst]; const ty = self.air.typeOf(bin_op.lhs); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}), .Int => { try self.spillEflagsIfOccupied(); @@ -3160,7 +3162,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .sub_with_overflow => .sub, else => unreachable, }, bin_op.lhs, bin_op.rhs); - const int_info = ty.intInfo(self.target.*); + const int_info = ty.intInfo(mod); const cc: Condition = switch (int_info.signedness) { .unsigned => .c, .signed => .o, @@ -3177,16 +3179,16 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), Type.u1, .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), ty, partial_mcv, ); @@ -3194,7 +3196,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -3205,12 +3207,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}), .Int => { try self.spillEflagsIfOccupied(); @@ -3219,7 +3222,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); const partial_mcv = try self.genShiftBinOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty); const partial_lock = switch (partial_mcv) { @@ -3249,16 +3252,16 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1), .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), tuple_ty.structFieldType(0), partial_mcv, ); @@ -3266,7 +3269,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -3283,6 +3286,7 @@ fn genSetFrameTruncatedOverflowCompare( src_mcv: MCValue, overflow_cc: ?Condition, ) !void { + const mod = self.bin_file.options.module.?; const src_lock = switch (src_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -3290,22 +3294,12 @@ fn genSetFrameTruncatedOverflowCompare( defer if (src_lock) |lock| self.register_manager.unlockReg(lock); const ty = tuple_ty.structFieldType(0); - const int_info = ty.intInfo(self.target.*); + const int_info = ty.intInfo(mod); - var hi_limb_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (int_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = (int_info.bits - 1) % 64 + 1, - }; - const hi_limb_ty = Type.initPayload(&hi_limb_pl.base); + const hi_limb_bits = (int_info.bits - 1) % 64 + 1; + const hi_limb_ty = try mod.intType(int_info.signedness, hi_limb_bits); - var rest_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = int_info.bits - hi_limb_pl.data, - }; - const rest_ty = Type.initPayload(&rest_pl.base); + const rest_ty = try mod.intType(.unsigned, int_info.bits - hi_limb_bits); const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp); const temp_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs); @@ -3335,7 +3329,7 @@ fn genSetFrameTruncatedOverflowCompare( ); } - const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)); + const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, mod)); if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv); try self.genSetMem( .{ .frame = frame_index }, @@ -3345,23 +3339,24 @@ fn genSetFrameTruncatedOverflowCompare( ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const dst_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = switch (dst_ty.zigTypeTag()) { + const result: MCValue = switch (dst_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), .Int => result: { try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); - const dst_info = dst_ty.intInfo(self.target.*); + const dst_info = dst_ty.intInfo(mod); const cc: Condition = switch (dst_info.signedness) { .unsigned => .c, .signed => .o, @@ -3369,11 +3364,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_active_bits = self.activeIntBits(bin_op.lhs); const rhs_active_bits = self.activeIntBits(bin_op.rhs); - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2) }; - const src_ty = Type.initPayload(&src_pl.base); + const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); + const src_ty = try mod.intType(dst_info.signedness, src_bits); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -3391,26 +3383,26 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } }; } else { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, else => { // For now, this is the only supported multiply that doesn't fit in a register. - assert(dst_info.bits <= 128 and src_pl.data == 64); + assert(dst_info.bits <= 128 and src_bits == 64); const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), tuple_ty.structFieldType(0), partial_mcv, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), tuple_ty.structFieldType(1), .{ .immediate = 0 }, // cc being set is impossible ); @@ -3433,7 +3425,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { /// Clobbers .rax and .rdx registers. /// Quotient is saved in .rax and remainder in .rdx. fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); } @@ -3472,8 +3465,9 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue /// Always returns a register. /// Clobbers .rax and .rdx registers. fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const int_info = ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); + const int_info = ty.intInfo(mod); const dividend: Register = switch (lhs) { .register => |reg| reg, else => try self.copyToTmpRegister(ty, lhs), @@ -3585,6 +3579,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { const dst_ty = self.air.typeOfIndex(inst); @@ -3592,7 +3587,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const opt_ty = src_ty.childType(); const src_mcv = try self.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { break :result if (self.liveness.isUnused(inst)) .unreach else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) @@ -3610,7 +3605,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); const pl_ty = dst_ty.childType(); - const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); + const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; }; @@ -3618,6 +3613,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_union_ty = self.air.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(); @@ -3629,11 +3625,11 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result operand; } - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const err_off = errUnionErrorOffset(payload_ty, mod); switch (operand) { .register => |reg| { // TODO reuse operand @@ -3678,12 +3674,13 @@ fn genUnwrapErrorUnionPayloadMir( err_union_ty: Type, err_union: MCValue, ) !MCValue { + const mod = self.bin_file.options.module.?; const payload_ty = err_union_ty.errorUnionPayload(); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); switch (err_union) { .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, @@ -3720,6 +3717,7 @@ fn genUnwrapErrorUnionPayloadMir( // *(E!T) -> E fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -3739,8 +3737,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); - const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), @@ -3755,6 +3753,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { // *(E!T) -> *T fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -3777,8 +3776,8 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(); const pl_ty = eu_ty.errorUnionPayload(); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3789,6 +3788,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.air.typeOf(ty_op.operand); @@ -3803,8 +3803,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const eu_ty = src_ty.childType(); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); - const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ @@ -3824,8 +3824,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3853,14 +3853,15 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const pl_ty = self.air.typeOf(ty_op.operand); - if (!pl_ty.hasRuntimeBits()) break :result .{ .immediate = 1 }; + if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 }; const opt_ty = self.air.typeOfIndex(inst); const pl_mcv = try self.resolveInst(ty_op.operand); - const same_repr = opt_ty.optionalReprIsPayload(); + const same_repr = opt_ty.optionalReprIsPayload(mod); if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv; const pl_lock: ?RegisterLock = switch (pl_mcv) { @@ -3873,7 +3874,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { try self.genCopy(pl_ty, opt_mcv, pl_mcv); if (!same_repr) { - const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); + const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); switch (opt_mcv) { else => unreachable, @@ -3900,6 +3901,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); @@ -3908,11 +3910,11 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result .{ .immediate = 0 }; + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 }; - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3922,6 +3924,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); @@ -3929,11 +3932,11 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const err_ty = eu_ty.errorUnionSet(); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result try self.resolveInst(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); const operand = try self.resolveInst(ty_op.operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); @@ -3974,6 +3977,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -3994,7 +3998,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -4041,6 +4045,7 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi } fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { + const mod = self.bin_file.options.module.?; const slice_ty = self.air.typeOf(lhs); const slice_mcv = try self.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { @@ -4050,7 +4055,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -4097,6 +4102,7 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const array_ty = self.air.typeOf(bin_op.lhs); @@ -4108,7 +4114,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { defer if (array_lock) |lock| self.register_manager.unlockReg(lock); const elem_ty = array_ty.childType(); - const elem_abi_size = elem_ty.abiSize(self.target.*); + const elem_abi_size = elem_ty.abiSize(mod); const index_ty = self.air.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); @@ -4125,7 +4131,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const addr_reg = try self.register_manager.allocReg(null, gp); switch (array) { .register => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array); try self.asmRegisterMemory( .{ ._, .lea }, @@ -4162,14 +4168,15 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); // this is identical to the `airPtrElemPtr` codegen expect here an // additional `mov` is needed at the end to get the actual value - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_ty = ptr_ty.elemType2(mod); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); const index_ty = self.air.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { @@ -4207,6 +4214,7 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4218,8 +4226,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { }; defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = elem_ty.abiSize(self.target.*); + const elem_ty = ptr_ty.elemType2(mod); + const elem_abi_size = elem_ty.abiSize(mod); const index_ty = self.air.typeOf(extra.rhs); const index = try self.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index) { @@ -4239,11 +4247,12 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_union_ty = self.air.typeOf(bin_op.lhs); const union_ty = ptr_union_ty.childType(); const tag_ty = self.air.typeOf(bin_op.rhs); - const layout = union_ty.unionGetLayout(self.target.*); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -4284,11 +4293,12 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { } fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const tag_ty = self.air.typeOfIndex(inst); const union_ty = self.air.typeOf(ty_op.operand); - const layout = union_ty.unionGetLayout(self.target.*); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none }); @@ -4302,7 +4312,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const tag_abi_size = tag_ty.abiSize(self.target.*); + const tag_abi_size = tag_ty.abiSize(mod); const dst_mcv: MCValue = blk: { switch (operand) { .load_frame => |frame_addr| { @@ -4337,6 +4347,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { } fn airClz(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { const dst_ty = self.air.typeOfIndex(inst); @@ -4358,7 +4369,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const src_bits = src_ty.bitSize(self.target.*); + const src_bits = src_ty.bitSize(mod); if (self.hasFeature(.lzcnt)) { if (src_bits <= 8) { const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv); @@ -4405,7 +4416,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } if (src_bits > 64) - return self.fail("TODO airClz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); + return self.fail("TODO airClz of {}", .{src_ty.fmt(mod)}); if (math.isPowerOfTwo(src_bits)) { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits ^ (src_bits - 1), @@ -4422,7 +4433,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(imm_reg, cmov_abi_size), @@ -4449,7 +4460,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { .{ .register = wide_reg }, ); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(imm_reg, cmov_abi_size), registerAlias(dst_reg, cmov_abi_size), @@ -4465,11 +4476,12 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } fn airCtz(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { const dst_ty = self.air.typeOfIndex(inst); const src_ty = self.air.typeOf(ty_op.operand); - const src_bits = src_ty.bitSize(self.target.*); + const src_bits = src_ty.bitSize(mod); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4548,7 +4560,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(width_reg, cmov_abi_size), @@ -4560,10 +4572,11 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { } fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); if (self.hasFeature(.popcnt)) { @@ -4729,6 +4742,7 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); @@ -4738,7 +4752,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, + if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4749,10 +4763,11 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { } fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false); @@ -4847,7 +4862,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, + if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4858,17 +4873,18 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const tag = self.air.instructions.items(.tag)[inst]; const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); - const abi_size: u32 = switch (ty.abiSize(self.target.*)) { + const abi_size: u32 = switch (ty.abiSize(mod)) { 1...16 => 16, 17...32 => 32, else => return self.fail("TODO implement airFloatSign for {}", .{ ty.fmt(self.bin_file.options.module.?), }), }; - const scalar_bits = ty.scalarType().floatBits(self.target.*); + const scalar_bits = ty.scalarType(mod).floatBits(self.target.*); const src_mcv = try self.resolveInst(un_op); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -4905,21 +4921,17 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - var int_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_signed }, - .data = scalar_bits, - }; var vec_pl = Type.Payload.Array{ .base = .{ .tag = .vector }, .data = .{ .len = @divExact(abi_size * 8, scalar_bits), - .elem_type = Type.initPayload(&int_pl.base), + .elem_type = try mod.intType(.signed, scalar_bits), }, }; const vec_ty = Type.initPayload(&vec_pl.base); const sign_val = switch (tag) { - .neg => try vec_ty.minInt(stack.get(), self.target.*), - .fabs => try vec_ty.maxInt(stack.get(), self.target.*), + .neg => try vec_ty.minInt(stack.get(), mod), + .fabs => try vec_ty.maxInt(stack.get(), mod), else => unreachable, }; @@ -5008,17 +5020,18 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { } fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4) !void { + const mod = self.bin_file.options.module.?; if (!self.hasFeature(.sse4_1)) return self.fail("TODO implement genRound without sse4_1 feature", .{}); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, @@ -5041,7 +5054,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 })) |tag| tag else return self.fail("TODO implement genRound for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const dst_alias = registerAlias(dst_reg, abi_size); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( @@ -5078,9 +5091,10 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 } fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5092,7 +5106,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { const mat_src_reg = if (src_mcv.isRegister()) @@ -5114,7 +5128,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { 1 => { @@ -5186,7 +5200,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => unreachable, })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( @@ -5274,10 +5288,11 @@ fn reuseOperandAdvanced( } fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; const ptr_info = ptr_ty.ptrInfo().data; const val_ty = ptr_info.pointee_type; - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); const limb_abi_size: u32 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const val_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); @@ -5382,20 +5397,21 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; try self.spillRegisters(&.{ .rdi, .rsi, .rcx }); const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); const ptr_ty = self.air.typeOf(ty_op.operand); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); - const elem_rc = regClassForType(elem_ty); - const ptr_rc = regClassForType(ptr_ty); + const elem_rc = regClassForType(elem_ty, mod); + const ptr_rc = regClassForType(ptr_ty, mod); const ptr_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (elem_size <= 8 and elem_rc.supersetOf(ptr_rc) and @@ -5416,13 +5432,14 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; const ptr_info = ptr_ty.ptrInfo().data; const src_ty = ptr_ty.childType(); const limb_abi_size: u16 = @min(ptr_info.host_size, 8); const limb_abi_bits = limb_abi_size * 8; - const src_bit_size = src_ty.bitSize(self.target.*); + const src_bit_size = src_ty.bitSize(mod); const src_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); const src_bit_off = ptr_info.bit_offset % limb_abi_bits; @@ -5555,14 +5572,15 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { } fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { + const mod = self.bin_file.options.module.?; const ptr_field_ty = self.air.typeOfIndex(inst); const ptr_container_ty = self.air.typeOf(operand); const container_ty = ptr_container_ty.childType(); const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { - .Auto, .Extern => container_ty.structFieldOffset(index, self.target.*), - .Packed => if (container_ty.zigTypeTag() == .Struct and + .Auto, .Extern => container_ty.structFieldOffset(index, mod), + .Packed => if (container_ty.zigTypeTag(mod) == .Struct and ptr_field_ty.ptrInfo().data.host_size == 0) - container_ty.packedStructFieldByteOffset(index, self.target.*) + container_ty.packedStructFieldByteOffset(index, mod) else 0, }); @@ -5577,6 +5595,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const result: MCValue = result: { @@ -5584,17 +5603,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const index = extra.field_index; const container_ty = self.air.typeOf(operand); - const container_rc = regClassForType(container_ty); + const container_rc = regClassForType(container_ty, mod); const field_ty = container_ty.structFieldType(index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; - const field_rc = regClassForType(field_ty); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + const field_rc = regClassForType(field_ty, mod); const field_is_gp = field_rc.supersetOf(gp); const src_mcv = try self.resolveInst(operand); const field_off = switch (container_ty.containerLayout()) { - .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, self.target.*) * 8), + .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8), .Packed => if (container_ty.castTag(.@"struct")) |struct_obj| - struct_obj.data.packedFieldBitOffset(self.target.*, index) + struct_obj.data.packedFieldBitOffset(mod, index) else 0, }; @@ -5611,7 +5630,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const field_abi_size = @intCast(u32, field_ty.abiSize(self.target.*)); + const field_abi_size = @intCast(u32, field_ty.abiSize(mod)); const limb_abi_size: u32 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); @@ -5733,12 +5752,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const inst_ty = self.air.typeOfIndex(inst); const parent_ty = inst_ty.childType(); - const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, self.target.*)); + const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); const src_mcv = try self.resolveInst(extra.field_ptr); const dst_mcv = if (src_mcv.isRegisterOffset() and @@ -5751,9 +5771,10 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { } fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { + const mod = self.bin_file.options.module.?; const src_ty = self.air.typeOf(src_air); const src_mcv = try self.resolveInst(src_air); - if (src_ty.zigTypeTag() == .Vector) { + if (src_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); } @@ -5786,28 +5807,22 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { - const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(self.target.*), 8)); + const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8)); const int_info = if (src_ty.tag() == .bool) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else - src_ty.intInfo(self.target.*); + src_ty.intInfo(mod); var byte_off: i32 = 0; while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) { - var limb_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (int_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)), - }; - const limb_ty = Type.initPayload(&limb_pl.base); + const limb_bits = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)); + const limb_ty = try mod.intType(int_info.signedness, limb_bits); const limb_mcv = switch (byte_off) { 0 => dst_mcv, else => dst_mcv.address().offset(byte_off).deref(), }; - if (limb_pl.base.tag == .int_unsigned and self.regExtraBits(limb_ty) > 0) { - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_pl.data); + if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) { + const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_bits); try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask }); } else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv); } @@ -5819,7 +5834,8 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: } fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void { - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(self.bin_file.options.module.?), @@ -5866,6 +5882,7 @@ fn genShiftBinOpMir( lhs_mcv: MCValue, shift_mcv: MCValue, ) !void { + const mod = self.bin_file.options.module.?; const rhs_mcv: MCValue = rhs: { switch (shift_mcv) { .immediate => |imm| switch (imm) { @@ -5880,7 +5897,7 @@ fn genShiftBinOpMir( break :rhs .{ .register = .rcx }; }; - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size <= 8) { switch (lhs_mcv) { .register => |lhs_reg| switch (rhs_mcv) { @@ -6099,13 +6116,14 @@ fn genShiftBinOp( lhs_ty: Type, rhs_ty: Type, ) !MCValue { - if (lhs_ty.zigTypeTag() == .Vector) { + const mod = self.bin_file.options.module.?; + if (lhs_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } - assert(rhs_ty.abiSize(self.target.*) == 1); + assert(rhs_ty.abiSize(mod) == 1); - const lhs_abi_size = lhs_ty.abiSize(self.target.*); + const lhs_abi_size = lhs_ty.abiSize(mod); if (lhs_abi_size > 16) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } @@ -6136,7 +6154,7 @@ fn genShiftBinOp( break :dst dst_mcv; }; - const signedness = lhs_ty.intInfo(self.target.*).signedness; + const signedness = lhs_ty.intInfo(mod).signedness; try self.genShiftBinOpMir(switch (air_tag) { .shl, .shl_exact => switch (signedness) { .signed => .{ ._l, .sa }, @@ -6163,11 +6181,12 @@ fn genMulDivBinOp( lhs: MCValue, rhs: MCValue, ) !MCValue { - if (dst_ty.zigTypeTag() == .Vector or dst_ty.zigTypeTag() == .Float) { + const mod = self.bin_file.options.module.?; + if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) { return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()}); } - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); if (switch (tag) { else => unreachable, .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2, @@ -6184,7 +6203,7 @@ fn genMulDivBinOp( const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock); - const signedness = ty.intInfo(self.target.*).signedness; + const signedness = ty.intInfo(mod).signedness; switch (tag) { .mul, .mulwrap, @@ -6338,13 +6357,14 @@ fn genBinOp( lhs_air: Air.Inst.Ref, rhs_air: Air.Inst.Ref, ) !MCValue { + const mod = self.bin_file.options.module.?; const lhs_ty = self.air.typeOf(lhs_air); const rhs_ty = self.air.typeOf(rhs_air); - const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, lhs_ty.abiSize(mod)); const maybe_mask_reg = switch (air_tag) { else => null, - .max, .min => if (lhs_ty.scalarType().isRuntimeFloat()) registerAlias( + .max, .min => if (lhs_ty.scalarType(mod).isRuntimeFloat()) registerAlias( if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: { try self.register_manager.getReg(.xmm0, null); break :mask .xmm0; @@ -6384,7 +6404,7 @@ fn genBinOp( else => false, }; - const vec_op = switch (lhs_ty.zigTypeTag()) { + const vec_op = switch (lhs_ty.zigTypeTag(mod)) { else => false, .Float, .Vector => true, }; @@ -6456,7 +6476,7 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - const elem_size = lhs_ty.elemType2().abiSize(self.target.*); + const elem_size = lhs_ty.elemType2(mod).abiSize(mod); try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size }); try self.genBinOpMir( switch (air_tag) { @@ -6506,7 +6526,7 @@ fn genBinOp( try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, dst_mcv, mat_src_mcv); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); const cc: Condition = switch (int_info.signedness) { .unsigned => switch (air_tag) { .min => .a, @@ -6520,7 +6540,7 @@ fn genBinOp( }, }; - const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(mod)), 2); const tmp_reg = switch (dst_mcv) { .register => |reg| reg, else => try self.copyToTmpRegister(lhs_ty, dst_mcv), @@ -6581,7 +6601,7 @@ fn genBinOp( } const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { else => unreachable, .Float => switch (lhs_ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { @@ -6657,9 +6677,9 @@ fn genBinOp( 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { else => null, - .Int => switch (lhs_ty.childType().intInfo(self.target.*).bits) { + .Int => switch (lhs_ty.childType().intInfo(mod).bits) { 8 => switch (lhs_ty.vectorLen()) { 1...16 => switch (air_tag) { .add, @@ -6671,7 +6691,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .mins } else if (self.hasFeature(.sse4_1)) @@ -6685,7 +6705,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6711,11 +6731,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null, }, @@ -6737,7 +6757,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .mins } else @@ -6747,7 +6767,7 @@ fn genBinOp( else .{ .p_w, .minu }, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .maxs } else @@ -6772,11 +6792,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null, }, @@ -6803,7 +6823,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .mins } else if (self.hasFeature(.sse4_1)) @@ -6817,7 +6837,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6846,11 +6866,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType().intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null, }, @@ -7206,14 +7226,14 @@ fn genBinOp( const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size); try self.asmRegisterRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .cmp }, 64 => .{ .v_sd, .cmp }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1 => .{ .v_ss, .cmp }, @@ -7240,14 +7260,14 @@ fn genBinOp( Immediate.u(3), // unord ); try self.asmRegisterRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ps, .blendv }, 64 => .{ .v_pd, .blendv }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...8 => .{ .v_ps, .blendv }, @@ -7274,14 +7294,14 @@ fn genBinOp( } else { const has_blend = self.hasFeature(.sse4_1); try self.asmRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ss, .cmp }, 64 => .{ ._sd, .cmp }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1 => .{ ._ss, .cmp }, @@ -7307,14 +7327,14 @@ fn genBinOp( Immediate.u(if (has_blend) 3 else 7), // unord, ord ); if (has_blend) try self.asmRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .blendv }, 64 => .{ ._pd, .blendv }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .blendv }, @@ -7338,14 +7358,14 @@ fn genBinOp( mask_reg, ) else { try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"and" }, 64 => .{ ._pd, .@"and" }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .@"and" }, @@ -7368,14 +7388,14 @@ fn genBinOp( mask_reg, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .andn }, 64 => .{ ._pd, .andn }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .andn }, @@ -7398,14 +7418,14 @@ fn genBinOp( lhs_copy_reg.?, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"or" }, 64 => .{ ._pd, .@"or" }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { 32 => switch (lhs_ty.vectorLen()) { 1...4 => .{ ._ps, .@"or" }, @@ -7442,7 +7462,8 @@ fn genBinOpMir( dst_mcv: MCValue, src_mcv: MCValue, ) !void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -7640,7 +7661,7 @@ fn genBinOpMir( defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); const ty_signedness = - if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned; + if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned; const limb_ty = if (abi_size <= 8) ty else switch (ty_signedness) { .signed => Type.usize, .unsigned => Type.isize, @@ -7796,7 +7817,8 @@ fn genBinOpMir( /// Performs multi-operand integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. /// Does not support byte-size operands. fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -8022,6 +8044,7 @@ fn airFence(self: *Self, inst: Air.Inst.Index) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for x86_64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; @@ -8029,7 +8052,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const ty = self.air.typeOf(callee); - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(), else => unreachable, @@ -8077,7 +8100,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .none, .unreach => null, .indirect => |reg_off| lock: { const ret_ty = fn_ty.fnReturnType(); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod)); try self.genSetReg(reg_off.reg, Type.usize, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, }); @@ -8100,8 +8123,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - const mod = self.bin_file.options.module.?; - if (self.air.value(callee)) |func_value| { + if (self.air.value(callee, mod)) |func_value| { if (if (func_value.castTag(.function)) |func_payload| func_payload.data.owner_decl else if (func_value.castTag(.decl_ref)) |decl_ref_payload| @@ -8178,7 +8200,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(.rax, Type.usize, mcv); try self.asmRegister(.{ ._, .call }, .rax); @@ -8234,6 +8256,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { } fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ty = self.air.typeOf(bin_op.lhs); @@ -8255,9 +8278,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const result = MCValue{ - .eflags = switch (ty.zigTypeTag()) { + .eflags = switch (ty.zigTypeTag(mod)) { else => result: { - const abi_size = @intCast(u16, ty.abiSize(self.target.*)); + const abi_size = @intCast(u16, ty.abiSize(mod)); const may_flip: enum { may_flip, must_flip, @@ -8290,7 +8313,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (src_lock) |lock| self.register_manager.unlockReg(lock); break :result Condition.fromCompareOperator( - if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned, + if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned, result_op: { const flipped_op = if (flipped) op.reverse() else op; if (abi_size > 8) switch (flipped_op) { @@ -8404,7 +8427,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp2_reg, tmp1_reg); try self.genBinOpMir(.{ ._ss, .ucomi }, ty, tmp1_mcv, tmp2_mcv); } else return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), 32 => try self.genBinOpMir( .{ ._ss, .ucomi }, @@ -8419,7 +8442,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { src_mcv, ), else => return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), } @@ -8454,7 +8477,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { self.eflags_inst = inst; const op_ty = self.air.typeOf(un_op); - const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*)); + const op_abi_size = @intCast(u32, op_ty.abiSize(mod)); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { .register => |reg| reg, @@ -8573,7 +8596,8 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { } fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (mcv) { .eflags => |cc| { // Here we map the opposites since the jump is to the false branch. @@ -8646,6 +8670,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; switch (opt_mcv) { .register_overflow => |ro| return .{ .eflags = ro.eflags.negate() }, else => {}, @@ -8658,10 +8683,10 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const pl_ty = opt_ty.optionalChild(&pl_buf); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload()) + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool }; + .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; switch (opt_mcv) { .none, @@ -8681,14 +8706,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register => |opt_reg| { if (some_info.off == 0) { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); const alias_reg = registerAlias(opt_reg, some_abi_size); assert(some_abi_size * 8 == alias_reg.bitSize()); try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } assert(some_info.ty.tag() == .bool); - const opt_abi_size = @intCast(u32, opt_ty.abiSize(self.target.*)); + const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod)); try self.asmRegisterImmediate( .{ ._, .bt }, registerAlias(opt_reg, opt_abi_size), @@ -8707,7 +8732,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC defer self.register_manager.unlockReg(addr_reg_lock); try self.genSetReg(addr_reg, Type.usize, opt_mcv.address()); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8720,7 +8745,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC }, .indirect, .load_frame => { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) { @@ -8742,6 +8767,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC } fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; try self.spillEflagsIfOccupied(); self.eflags_inst = inst; @@ -8750,10 +8776,10 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const pl_ty = opt_ty.optionalChild(&pl_buf); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload()) + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool }; + .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; const ptr_reg = switch (ptr_mcv) { .register => |reg| reg, @@ -8762,7 +8788,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const ptr_lock = self.register_manager.lockReg(ptr_reg); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8775,6 +8801,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) } fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; const err_type = ty.errorUnionSet(); if (err_type.errorSetIsEmpty()) { @@ -8786,7 +8813,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! self.eflags_inst = inst; } - const err_off = errUnionErrorOffset(ty.errorUnionPayload(), self.target.*); + const err_off = errUnionErrorOffset(ty.errorUnionPayload(), mod); switch (operand) { .register => |reg| { const eu_lock = self.register_manager.lockReg(reg); @@ -9088,12 +9115,13 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void { } fn airBr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const br = self.air.instructions.items(.data)[inst].br; const src_mcv = try self.resolveInst(br.operand); const block_ty = self.air.typeOfIndex(br.block_inst); const block_unused = - !block_ty.hasRuntimeBitsIgnoreComptime() or self.liveness.isUnused(br.block_inst); + !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; const block_data = self.blocks.getPtr(br.block_inst).?; const first_br = block_data.relocs.items.len == 0; @@ -9402,7 +9430,8 @@ const MoveStrategy = union(enum) { }; }; fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { - switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (ty.zigTypeTag(mod)) { else => return .{ .move = .{ ._, .mov } }, .Float => switch (ty.floatBits(self.target.*)) { 16 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ @@ -9419,8 +9448,8 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, else => {}, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Int => switch (ty.childType().intInfo(self.target.*).bits) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { + .Int => switch (ty.childType().intInfo(mod).bits) { 8 => switch (ty.vectorLen()) { 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ .insert = .{ .vp_b, .insr }, @@ -9647,7 +9676,8 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError } fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size * 8 > dst_reg.bitSize()) return self.fail("genSetReg called with a value larger than dst_reg", .{}); switch (src_mcv) { @@ -9730,7 +9760,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .{ .register = try self.copyToTmpRegister(ty, src_mcv) }, ), .sse => try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType().zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType(mod).zigTypeTag(mod)) { else => switch (abi_size) { 1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9738,7 +9768,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr 17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null, else => null, }, - .Float => switch (ty.scalarType().floatBits(self.target.*)) { + .Float => switch (ty.scalarType(mod).floatBits(self.target.*)) { 16, 128 => switch (abi_size) { 2...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9789,7 +9819,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .indirect => try self.moveStrategy(ty, false), .load_frame => |frame_addr| try self.moveStrategy( ty, - self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(self.target.*), + self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(mod), ), .lea_frame => .{ .move = .{ ._, .lea } }, else => unreachable, @@ -9821,7 +9851,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr switch (try self.moveStrategy(ty, mem.isAlignedGeneric( u32, @bitCast(u32, small_addr), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ))) { .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem), .insert_extract => |ie| try self.asmRegisterMemoryImmediate( @@ -9839,7 +9869,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ), } }, - .load_direct => |sym_index| switch (ty.zigTypeTag()) { + .load_direct => |sym_index| switch (ty.zigTypeTag(mod)) { else => { const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ @@ -9933,7 +9963,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr } fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); const dst_ptr_mcv: MCValue = switch (base) { .none => .{ .immediate = @bitCast(u64, @as(i64, disp)) }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, @@ -9945,7 +9976,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal try self.genInlineMemset(dst_ptr_mcv, .{ .immediate = 0xaa }, .{ .immediate = abi_size }), .immediate => |imm| switch (abi_size) { 1, 2, 4 => { - const immediate = if (ty.isSignedInt()) + const immediate = if (ty.isSignedInt(mod)) Immediate.s(@truncate(i32, @bitCast(i64, imm))) else Immediate.u(@intCast(u32, imm)); @@ -9967,7 +9998,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(.dword, .{ .base = base, .disp = disp + offset }), - if (ty.isSignedInt()) + if (ty.isSignedInt(mod)) Immediate.s(@truncate( i32, @bitCast(i64, imm) >> (math.cast(u6, offset * 8) orelse 63), @@ -9991,19 +10022,19 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .none => mem.isAlignedGeneric( u32, @bitCast(u32, disp), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ), .reg => |reg| switch (reg) { .es, .cs, .ss, .ds => mem.isAlignedGeneric( u32, @bitCast(u32, disp), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ), else => false, }, .frame => |frame_index| self.getFrameAddrAlignment( .{ .index = frame_index, .off = disp }, - ) >= ty.abiAlignment(self.target.*), + ) >= ty.abiAlignment(mod), })) { .move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_alias), .insert_extract, .vex_insert_extract => |ie| try self.asmMemoryRegisterImmediate( @@ -10017,13 +10048,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .register_overflow => |ro| { try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(0, self.target.*)), + disp + @intCast(i32, ty.structFieldOffset(0, mod)), ty.structFieldType(0), .{ .register = ro.reg }, ); try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(1, self.target.*)), + disp + @intCast(i32, ty.structFieldOffset(1, mod)), ty.structFieldType(1), .{ .eflags = ro.eflags }, ); @@ -10146,13 +10177,14 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dst_ty = self.air.typeOfIndex(inst); const src_ty = self.air.typeOf(ty_op.operand); const result = result: { - const dst_rc = regClassForType(dst_ty); - const src_rc = regClassForType(src_ty); + const dst_rc = regClassForType(dst_ty, mod); + const src_rc = regClassForType(src_ty, mod); const src_mcv = try self.resolveInst(ty_op.operand); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -10172,13 +10204,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; const dst_signedness = - if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const src_signedness = - if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; if (dst_signedness == src_signedness) break :result dst_mcv; - const abi_size = @intCast(u16, dst_ty.abiSize(self.target.*)); - const bit_size = @intCast(u16, dst_ty.bitSize(self.target.*)); + const abi_size = @intCast(u16, dst_ty.abiSize(mod)); + const bit_size = @intCast(u16, dst_ty.bitSize(mod)); if (abi_size * 8 <= bit_size) break :result dst_mcv; const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable; @@ -10192,14 +10224,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const high_lock = self.register_manager.lockReg(high_reg); defer if (high_lock) |lock| self.register_manager.unlockReg(lock); - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (dst_signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = bit_size % 64, - }; - const high_ty = Type.initPayload(&high_pl.base); + const high_ty = try mod.intType(dst_signedness, bit_size % 64); try self.truncateRegister(high_ty, high_reg); if (!dst_mcv.isRegister()) try self.genCopy( @@ -10213,6 +10238,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ty = self.air.typeOfIndex(inst); @@ -10221,11 +10247,11 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const array_ty = ptr_ty.childType(); const array_len = array_ty.arrayLen(); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(self.target.*)), + @intCast(i32, ptr_ty.abiSize(mod)), Type.usize, .{ .immediate = array_len }, ); @@ -10235,12 +10261,13 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); - const src_bits = @intCast(u32, src_ty.bitSize(self.target.*)); + const src_bits = @intCast(u32, src_ty.bitSize(mod)); const src_signedness = - if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; const dst_ty = self.air.typeOfIndex(inst); const src_size = math.divCeil(u32, @max(switch (src_signedness) { @@ -10248,7 +10275,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { .unsigned => src_bits + 1, }, 32), 8) catch unreachable; if (src_size > 8) return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(mod), dst_ty.fmt(mod), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -10261,12 +10288,12 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg); - const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) { .Float => switch (dst_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 }, @@ -10275,7 +10302,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { }, else => null, })) |tag| tag else return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(mod), dst_ty.fmt(mod), }); const dst_alias = dst_reg.to128(); const src_alias = registerAlias(src_reg, src_size); @@ -10288,13 +10315,14 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const src_ty = self.air.typeOf(ty_op.operand); const dst_ty = self.air.typeOfIndex(inst); - const dst_bits = @intCast(u32, dst_ty.bitSize(self.target.*)); + const dst_bits = @intCast(u32, dst_ty.bitSize(mod)); const dst_signedness = - if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const dst_size = math.divCeil(u32, @max(switch (dst_signedness) { .signed => dst_bits, @@ -10312,13 +10340,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag(mod)) { .Float => switch (src_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_, .cvttss2si } else .{ ._, .cvttss2si }, 64 => if (self.hasFeature(.avx)) .{ .v_, .cvttsd2si } else .{ ._, .cvttsd2si }, @@ -10339,12 +10367,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr_ty = self.air.typeOf(extra.ptr); const val_ty = self.air.typeOf(extra.expected_value); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); @@ -10433,6 +10462,7 @@ fn atomicOp( rmw_op: ?std.builtin.AtomicRmwOp, order: std.builtin.AtomicOrder, ) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -10445,7 +10475,7 @@ fn atomicOp( }; defer if (val_lock) |lock| self.register_manager.unlockReg(lock); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); const ptr_mem = switch (ptr_mcv) { .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), @@ -10539,8 +10569,8 @@ fn atomicOp( .Or => try self.genBinOpMir(.{ ._, .@"or" }, val_ty, tmp_mcv, val_mcv), .Xor => try self.genBinOpMir(.{ ._, .xor }, val_ty, tmp_mcv, val_mcv), .Min, .Max => { - const cc: Condition = switch (if (val_ty.isAbiInt()) - val_ty.intInfo(self.target.*).signedness + const cc: Condition = switch (if (val_ty.isAbiInt(mod)) + val_ty.intInfo(mod).signedness else .unsigned) { .unsigned => switch (op) { @@ -10728,6 +10758,7 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr } fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -10752,7 +10783,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { }; defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); - const elem_abi_size = @intCast(u31, elem_ty.abiSize(self.target.*)); + const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod)); if (elem_abi_size == 1) { const ptr: MCValue = switch (dst_ptr_ty.ptrSize()) { @@ -10897,8 +10928,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { // We need a properly aligned and sized call frame to be able to call this function. { const needed_call_frame = FrameAlloc.init(.{ - .size = inst_ty.abiSize(self.target.*), - .alignment = inst_ty.abiAlignment(self.target.*), + .size = inst_ty.abiSize(mod), + .alignment = inst_ty.abiAlignment(mod), }); const frame_allocs_slice = self.frame_allocs.slice(); const stack_frame_size = @@ -11013,14 +11044,15 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { } fn airSplat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const vector_ty = self.air.typeOfIndex(inst); - const dst_rc = regClassForType(vector_ty); - const scalar_ty = vector_ty.scalarType(); + const dst_rc = regClassForType(vector_ty, mod); + const scalar_ty = vector_ty.scalarType(mod); const src_mcv = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - switch (scalar_ty.zigTypeTag()) { + switch (scalar_ty.zigTypeTag(mod)) { else => {}, .Float => switch (scalar_ty.floatBits(self.target.*)) { 32 => switch (vector_ty.vectorLen()) { @@ -11233,36 +11265,37 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result_ty = self.air.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = result: { - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Struct => { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); if (result_ty.containerLayout() == .Packed) { const struct_obj = result_ty.castTag(.@"struct").?.data; try self.genInlineMemset( .{ .lea_frame = .{ .index = frame_index } }, .{ .immediate = 0 }, - .{ .immediate = result_ty.abiSize(self.target.*) }, + .{ .immediate = result_ty.abiSize(mod) }, ); for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; + if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); - const elem_bit_size = @intCast(u32, elem_ty.bitSize(self.target.*)); + const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); if (elem_bit_size > 64) { return self.fail( "TODO airAggregateInit implement packed structs with large fields", .{}, ); } - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); const elem_abi_bits = elem_abi_size * 8; - const elem_off = struct_obj.packedFieldBitOffset(self.target.*, elem_i); + const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i); const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try self.resolveInst(elem); @@ -11322,10 +11355,10 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } } } else for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; + if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); - const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, self.target.*)); + const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, @@ -11337,9 +11370,9 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { }, .Array => { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); const elem_ty = result_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); for (elements, 0..) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); @@ -11374,11 +11407,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const result: MCValue = result: { const union_ty = self.air.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(self.target.*); + const layout = union_ty.unionGetLayout(mod); const src_ty = self.air.typeOf(extra.init); const src_mcv = try self.resolveInst(extra.init); @@ -11400,7 +11434,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const tag_val = Value.initPayload(&tag_pl.base); var tag_int_pl: Value.Payload.U64 = undefined; const tag_int_val = tag_val.enumToInt(tag_ty, &tag_int_pl); - const tag_int = tag_int_val.toUnsignedInt(self.target.*); + const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) else @@ -11424,6 +11458,7 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { } fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const ty = self.air.typeOfIndex(inst); @@ -11466,14 +11501,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const mir_tag = if (@as( ?Mir.Inst.FixedTag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd132 }, 64 => .{ .v_sd, .fmadd132 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => .{ .v_ss, .fmadd132 }, @@ -11493,14 +11528,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd213 }, 64 => .{ .v_sd, .fmadd213 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => .{ .v_ss, .fmadd213 }, @@ -11520,14 +11555,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd231 }, 64 => .{ .v_sd, .fmadd231 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType().zigTypeTag(mod)) { .Float => switch (ty.childType().floatBits(self.target.*)) { 32 => switch (ty.vectorLen()) { 1 => .{ .v_ss, .fmadd231 }, @@ -11555,7 +11590,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { var mops: [3]MCValue = undefined; for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv; - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( @@ -11573,10 +11608,11 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const ty = self.air.typeOf(ref); // If the type has no codegen bits, no need to store it. - if (!ty.hasRuntimeBitsIgnoreComptime()) return .none; + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; if (Air.refToIndex(ref)) |inst| { const mcv = switch (self.air.instructions.items(.tag)[inst]) { @@ -11584,7 +11620,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref).?, + .val = self.air.value(ref, mod).?, })); break :tracking gop.value_ptr; }, @@ -11597,7 +11633,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } } - return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref).? }); + return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref, mod).? }); } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { @@ -11670,6 +11706,7 @@ fn resolveCallingConventionValues( var_args: []const Air.Inst.Ref, stack_frame_base: FrameIndex, ) !CallMCValues { + const mod = self.bin_file.options.module.?; const cc = fn_ty.fnCallingConvention(); const param_len = fn_ty.fnParamLen(); const param_types = try self.gpa.alloc(Type, param_len + var_args.len); @@ -11702,21 +11739,21 @@ fn resolveCallingConventionValues( switch (self.target.os.tag) { .windows => { // Align the stack to 16bytes before allocating shadow stack space (if any). - result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(self.target.*)); + result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(mod)); }, else => {}, } // Return values - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { // TODO: is this even possible for C calling convention? result.return_value = InstTracking.init(.none); } else { const classes = switch (self.target.os.tag) { - .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, self.target.*)}, - else => mem.sliceTo(&abi.classifySystemV(ret_ty, self.target.*, .ret), .none), + .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, mod)}, + else => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, .ret), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); @@ -11725,7 +11762,7 @@ fn resolveCallingConventionValues( result.return_value = switch (classes[0]) { .integer => InstTracking.init(.{ .register = registerAlias( ret_reg, - @intCast(u32, ret_ty.abiSize(self.target.*)), + @intCast(u32, ret_ty.abiSize(mod)), ) }), .float, .sse => InstTracking.init(.{ .register = .xmm0 }), .memory => ret: { @@ -11744,11 +11781,11 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - assert(ty.hasRuntimeBitsIgnoreComptime()); + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); const classes = switch (self.target.os.tag) { - .windows => &[1]abi.Class{abi.classifyWindows(ty, self.target.*)}, - else => mem.sliceTo(&abi.classifySystemV(ty, self.target.*, .arg), .none), + .windows => &[1]abi.Class{abi.classifyWindows(ty, mod)}, + else => mem.sliceTo(&abi.classifySystemV(ty, mod, .arg), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); @@ -11783,8 +11820,8 @@ fn resolveCallingConventionValues( }), } - const param_size = @intCast(u31, ty.abiSize(self.target.*)); - const param_align = @intCast(u31, ty.abiAlignment(self.target.*)); + const param_size = @intCast(u31, ty.abiSize(mod)); + const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11798,13 +11835,13 @@ fn resolveCallingConventionValues( result.stack_align = 16; // Return values - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = InstTracking.init(.none); } else { const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0]; - const ret_ty_size = @intCast(u31, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u31, ret_ty.abiSize(mod)); if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) { const aliased_reg = registerAlias(ret_reg, ret_ty_size); result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none }; @@ -11819,12 +11856,12 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { arg.* = .none; continue; } - const param_size = @intCast(u31, ty.abiSize(self.target.*)); - const param_align = @intCast(u31, ty.abiAlignment(self.target.*)); + const param_size = @intCast(u31, ty.abiSize(mod)); + const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11908,9 +11945,10 @@ fn registerAlias(reg: Register, size_bytes: u32) Register { /// Truncates the value in the register in place. /// Clobbers any remaining bits. fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { - const int_info = if (ty.isAbiInt()) ty.intInfo(self.target.*) else std.builtin.Type.Int{ + const mod = self.bin_file.options.module.?; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(self.target.*)), + .bits = @intCast(u16, ty.bitSize(mod)), }; const max_reg_bit_width = Register.rax.bitSize(); switch (int_info.signedness) { @@ -11953,8 +11991,9 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { } fn regBitSize(self: *Self, ty: Type) u64 { - const abi_size = ty.abiSize(self.target.*); - return switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); + return switch (ty.zigTypeTag(mod)) { else => switch (abi_size) { 1 => 8, 2 => 16, @@ -11971,7 +12010,8 @@ fn regBitSize(self: *Self, ty: Type) u64 { } fn regExtraBits(self: *Self, ty: Type) u64 { - return self.regBitSize(ty) - ty.bitSize(self.target.*); + const mod = self.bin_file.options.module.?; + return self.regBitSize(ty) - ty.bitSize(mod); } fn hasFeature(self: *Self, feature: Target.x86.Feature) bool { diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index e79424d6d8..c8d20c73fa 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -1,10 +1,3 @@ -const std = @import("std"); -const Type = @import("../../type.zig").Type; -const Target = std.Target; -const assert = std.debug.assert; -const Register = @import("bits.zig").Register; -const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; - pub const Class = enum { integer, sse, @@ -19,7 +12,7 @@ pub const Class = enum { float_combine, }; -pub fn classifyWindows(ty: Type, target: Target) Class { +pub fn classifyWindows(ty: Type, mod: *const Module) Class { // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017 // "There's a strict one-to-one correspondence between a function call's arguments // and the registers used for those arguments. Any argument that doesn't fit in 8 @@ -28,7 +21,7 @@ pub fn classifyWindows(ty: Type, target: Target) Class { // "All floating point operations are done using the 16 XMM registers." // "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed // as if they were integers of the same size." - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Pointer, .Int, .Bool, @@ -43,10 +36,10 @@ pub fn classifyWindows(ty: Type, target: Target) Class { .ErrorUnion, .AnyFrame, .Frame, - => switch (ty.abiSize(target)) { + => switch (ty.abiSize(mod)) { 0 => unreachable, 1, 2, 4, 8 => return .integer, - else => switch (ty.zigTypeTag()) { + else => switch (ty.zigTypeTag(mod)) { .Int => return .win_i128, .Struct, .Union => if (ty.containerLayout() == .Packed) { return .win_i128; @@ -75,13 +68,14 @@ pub const Context = enum { ret, arg, other }; /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { +pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { + const target = mod.getTarget(); const memory_class = [_]Class{ .memory, .none, .none, .none, .none, .none, .none, .none, }; var result = [1]Class{.none} ** 8; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Pointer => switch (ty.ptrSize()) { .Slice => { result[0] = .integer; @@ -94,7 +88,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { }, }, .Int, .Enum, .ErrorSet => { - const bits = ty.intInfo(target).bits; + const bits = ty.intInfo(mod).bits; if (bits <= 64) { result[0] = .integer; return result; @@ -165,7 +159,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { }, .Vector => { const elem_ty = ty.childType(); - const bits = elem_ty.bitSize(target) * ty.arrayLen(); + const bits = elem_ty.bitSize(mod) * ty.arrayLen(); if (bits <= 64) return .{ .sse, .none, .none, .none, .none, .none, .none, .none, @@ -204,7 +198,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { return memory_class; }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { result[0] = .integer; return result; } @@ -215,7 +209,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty.containerLayout() == .Packed) { assert(ty_size <= 128); result[0] = .integer; @@ -230,12 +224,12 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { const fields = ty.structFields(); for (fields.values()) |field| { if (field.abi_align != 0) { - if (field.abi_align < field.ty.abiAlignment(target)) { + if (field.abi_align < field.ty.abiAlignment(mod)) { return memory_class; } } - const field_size = field.ty.abiSize(target); - const field_class_array = classifySystemV(field.ty, target, .other); + const field_size = field.ty.abiSize(mod); + const field_class_array = classifySystemV(field.ty, mod, .other); const field_class = std.mem.sliceTo(&field_class_array, .none); if (byte_i + field_size <= 8) { // Combine this field with the previous one. @@ -334,7 +328,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty.containerLayout() == .Packed) { assert(ty_size <= 128); result[0] = .integer; @@ -347,12 +341,12 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { const fields = ty.unionFields(); for (fields.values()) |field| { if (field.abi_align != 0) { - if (field.abi_align < field.ty.abiAlignment(target)) { + if (field.abi_align < field.ty.abiAlignment(mod)) { return memory_class; } } // Combine this field with the previous one. - const field_class = classifySystemV(field.ty, target, .other); + const field_class = classifySystemV(field.ty, mod, .other); for (&result, 0..) |*result_item, i| { const field_item = field_class[i]; // "If both classes are equal, this is the resulting class." @@ -426,7 +420,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { return result; }, .Array => { - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty_size <= 64) { result[0] = .integer; return result; @@ -527,10 +521,17 @@ pub const RegisterClass = struct { }; }; +const builtin = @import("builtin"); +const std = @import("std"); +const Target = std.Target; +const assert = std.debug.assert; const testing = std.testing; + const Module = @import("../../Module.zig"); +const Register = @import("bits.zig").Register; +const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; +const Type = @import("../../type.zig").Type; const Value = @import("../../value.zig").Value; -const builtin = @import("builtin"); fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field { return .{ @@ -541,34 +542,3 @@ fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field { .is_comptime = false, }; } - -test "C_C_D" { - var fields = Module.Struct.Fields{}; - // const C_C_D = extern struct { v1: i8, v2: i8, v3: f64 }; - try fields.ensureTotalCapacity(testing.allocator, 3); - defer fields.deinit(testing.allocator); - fields.putAssumeCapacity("v1", _field(.i8, 0)); - fields.putAssumeCapacity("v2", _field(.i8, 1)); - fields.putAssumeCapacity("v3", _field(.f64, 4)); - - var C_C_D_struct = Module.Struct{ - .fields = fields, - .namespace = undefined, - .owner_decl = undefined, - .zir_index = undefined, - .layout = .Extern, - .status = .fully_resolved, - .known_non_opv = true, - .is_tuple = false, - }; - var C_C_D = Type.Payload.Struct{ .data = &C_C_D_struct }; - - try testing.expectEqual( - [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none }, - classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .ret), - ); - try testing.expectEqual( - [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none }, - classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .arg), - ); -} diff --git a/src/codegen.zig b/src/codegen.zig index adce183833..6846bebe6b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -154,7 +154,7 @@ pub fn generateLazySymbol( } mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); return Result.ok; - } else if (lazy_sym.ty.zigTypeTag() == .Enum) { + } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) { alignment.* = 1; for (lazy_sym.ty.enumFields().keys()) |tag_name| { try code.ensureUnusedCapacity(tag_name.len + 1); @@ -186,22 +186,22 @@ pub fn generateSymbol( typed_value.val = rt.data; } - const target = bin_file.options.target; + const mod = bin_file.options.module.?; + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - const mod = bin_file.options.module.?; log.debug("generateSymbol: ty = {}, val = {}", .{ typed_value.ty.fmt(mod), typed_value.val.fmtValue(typed_value.ty, mod), }); if (typed_value.val.isUndefDeep()) { - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); return Result.ok; } - switch (typed_value.ty.zigTypeTag()) { + switch (typed_value.ty.zigTypeTag(mod)) { .Fn => { return Result{ .fail = try ErrorMsg.create( @@ -219,7 +219,7 @@ pub fn generateSymbol( 64 => writeFloat(f64, typed_value.val.toFloat(f64), target, endian, try code.addManyAsArray(8)), 80 => { writeFloat(f80, typed_value.val.toFloat(f80), target, endian, try code.addManyAsArray(10)); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0, abi_size - 10); }, 128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)), @@ -242,7 +242,7 @@ pub fn generateSymbol( try code.ensureUnusedCapacity(bytes.len + 1); code.appendSliceAssumeCapacity(bytes); if (typed_value.ty.sentinel()) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(target)); + const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); code.appendAssumeCapacity(byte); } return Result.ok; @@ -330,11 +330,11 @@ pub fn generateSymbol( .zero, .one, .int_u64, .int_big_positive => { switch (target.ptrBitWidth()) { 32 => { - const x = typed_value.val.toUnsignedInt(target); + const x = typed_value.val.toUnsignedInt(mod); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); }, 64 => { - const x = typed_value.val.toUnsignedInt(target); + const x = typed_value.val.toUnsignedInt(mod); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); }, else => unreachable, @@ -399,19 +399,19 @@ pub fn generateSymbol( }, }, .Int => { - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= 8) { const x: u8 = switch (info.signedness) { - .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(target)), - .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(target))), + .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(mod)), + .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(mod))), }; try code.append(x); return Result.ok; } if (info.bits > 64) { var bigint_buffer: Value.BigIntSpace = undefined; - const bigint = typed_value.val.toBigInt(&bigint_buffer, target); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const bigint = typed_value.val.toBigInt(&bigint_buffer, mod); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; const start = code.items.len; try code.resize(start + abi_size); bigint.writeTwosComplement(code.items[start..][0..abi_size], endian); @@ -420,25 +420,25 @@ pub fn generateSymbol( switch (info.signedness) { .unsigned => { if (info.bits <= 16) { - const x = @intCast(u16, typed_value.val.toUnsignedInt(target)); + const x = @intCast(u16, typed_value.val.toUnsignedInt(mod)); mem.writeInt(u16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(u32, typed_value.val.toUnsignedInt(target)); + const x = @intCast(u32, typed_value.val.toUnsignedInt(mod)); mem.writeInt(u32, try code.addManyAsArray(4), x, endian); } else { - const x = typed_value.val.toUnsignedInt(target); + const x = typed_value.val.toUnsignedInt(mod); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); } }, .signed => { if (info.bits <= 16) { - const x = @intCast(i16, typed_value.val.toSignedInt(target)); + const x = @intCast(i16, typed_value.val.toSignedInt(mod)); mem.writeInt(i16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(i32, typed_value.val.toSignedInt(target)); + const x = @intCast(i32, typed_value.val.toSignedInt(mod)); mem.writeInt(i32, try code.addManyAsArray(4), x, endian); } else { - const x = typed_value.val.toSignedInt(target); + const x = typed_value.val.toSignedInt(mod); mem.writeInt(i64, try code.addManyAsArray(8), x, endian); } }, @@ -449,9 +449,9 @@ pub fn generateSymbol( var int_buffer: Value.Payload.U64 = undefined; const int_val = typed_value.enumToInt(&int_buffer); - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= 8) { - const x = @intCast(u8, int_val.toUnsignedInt(target)); + const x = @intCast(u8, int_val.toUnsignedInt(mod)); try code.append(x); return Result.ok; } @@ -468,25 +468,25 @@ pub fn generateSymbol( switch (info.signedness) { .unsigned => { if (info.bits <= 16) { - const x = @intCast(u16, int_val.toUnsignedInt(target)); + const x = @intCast(u16, int_val.toUnsignedInt(mod)); mem.writeInt(u16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(u32, int_val.toUnsignedInt(target)); + const x = @intCast(u32, int_val.toUnsignedInt(mod)); mem.writeInt(u32, try code.addManyAsArray(4), x, endian); } else { - const x = int_val.toUnsignedInt(target); + const x = int_val.toUnsignedInt(mod); mem.writeInt(u64, try code.addManyAsArray(8), x, endian); } }, .signed => { if (info.bits <= 16) { - const x = @intCast(i16, int_val.toSignedInt(target)); + const x = @intCast(i16, int_val.toSignedInt(mod)); mem.writeInt(i16, try code.addManyAsArray(2), x, endian); } else if (info.bits <= 32) { - const x = @intCast(i32, int_val.toSignedInt(target)); + const x = @intCast(i32, int_val.toSignedInt(mod)); mem.writeInt(i32, try code.addManyAsArray(4), x, endian); } else { - const x = int_val.toSignedInt(target); + const x = int_val.toSignedInt(mod); mem.writeInt(i64, try code.addManyAsArray(8), x, endian); } }, @@ -503,7 +503,7 @@ pub fn generateSymbol( const struct_obj = typed_value.ty.castTag(.@"struct").?.data; const fields = struct_obj.fields.values(); const field_vals = typed_value.val.castTag(.aggregate).?.data; - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; const current_pos = code.items.len; try code.resize(current_pos + abi_size); var bits: u16 = 0; @@ -512,8 +512,8 @@ pub fn generateSymbol( const field_ty = fields[index].ty; // pointer may point to a decl which must be marked used // but can also result in a relocation. Therefore we handle those seperately. - if (field_ty.zigTypeTag() == .Pointer) { - const field_size = math.cast(usize, field_ty.abiSize(target)) orelse return error.Overflow; + if (field_ty.zigTypeTag(mod) == .Pointer) { + const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow; var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); defer tmp_list.deinit(); switch (try generateSymbol(bin_file, src_loc, .{ @@ -526,7 +526,7 @@ pub fn generateSymbol( } else { field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; } - bits += @intCast(u16, field_ty.bitSize(target)); + bits += @intCast(u16, field_ty.bitSize(mod)); } return Result.ok; @@ -536,7 +536,7 @@ pub fn generateSymbol( const field_vals = typed_value.val.castTag(.aggregate).?.data; for (field_vals, 0..) |field_val, index| { const field_ty = typed_value.ty.structFieldType(index); - if (!field_ty.hasRuntimeBits()) continue; + if (!field_ty.hasRuntimeBits(mod)) continue; switch (try generateSymbol(bin_file, src_loc, .{ .ty = field_ty, @@ -548,7 +548,7 @@ pub fn generateSymbol( const unpadded_field_end = code.items.len - struct_begin; // Pad struct members if required - const padded_field_end = typed_value.ty.structFieldOffset(index + 1, target); + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; if (padding > 0) { @@ -560,7 +560,7 @@ pub fn generateSymbol( }, .Union => { const union_obj = typed_value.val.castTag(.@"union").?.data; - const layout = typed_value.ty.unionGetLayout(target); + const layout = typed_value.ty.unionGetLayout(mod); if (layout.payload_size == 0) { return generateSymbol(bin_file, src_loc, .{ @@ -584,7 +584,7 @@ pub fn generateSymbol( const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?; assert(union_ty.haveFieldTypes()); const field_ty = union_ty.fields.values()[field_index].ty; - if (!field_ty.hasRuntimeBits()) { + if (!field_ty.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); } else { switch (try generateSymbol(bin_file, src_loc, .{ @@ -595,7 +595,7 @@ pub fn generateSymbol( .fail => |em| return Result{ .fail = em }, } - const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(target)) orelse return error.Overflow; + const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } @@ -620,15 +620,15 @@ pub fn generateSymbol( .Optional => { var opt_buf: Type.Payload.ElemType = undefined; const payload_type = typed_value.ty.optionalChild(&opt_buf); - const is_pl = !typed_value.val.isNull(); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + const is_pl = !typed_value.val.isNull(mod); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - if (!payload_type.hasRuntimeBits()) { + if (!payload_type.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size); return Result.ok; } - if (typed_value.ty.optionalReprIsPayload()) { + if (typed_value.ty.optionalReprIsPayload(mod)) { if (typed_value.val.castTag(.opt_payload)) |payload| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, @@ -637,7 +637,7 @@ pub fn generateSymbol( .ok => {}, .fail => |em| return Result{ .fail = em }, } - } else if (!typed_value.val.isNull()) { + } else if (!typed_value.val.isNull(mod)) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, .val = typed_value.val, @@ -652,7 +652,7 @@ pub fn generateSymbol( return Result.ok; } - const padding = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow) - 1; + const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef); switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, @@ -671,7 +671,7 @@ pub fn generateSymbol( const payload_ty = typed_value.ty.errorUnionPayload(); const is_payload = typed_value.val.errorUnionIsPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, @@ -679,9 +679,9 @@ pub fn generateSymbol( }, code, debug_output, reloc_info); } - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - const abi_align = typed_value.ty.abiAlignment(target); + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_align = typed_value.ty.abiAlignment(mod); // error value first when its type is larger than the error union's payload if (error_align > payload_align) { @@ -743,7 +743,7 @@ pub fn generateSymbol( try code.writer().writeInt(u32, kv.value, endian); }, else => { - try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target))); + try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(mod))); }, } return Result.ok; @@ -752,7 +752,7 @@ pub fn generateSymbol( .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - len) orelse + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse return error.Overflow; try code.ensureUnusedCapacity(len + padding); code.appendSliceAssumeCapacity(bytes[0..len]); @@ -763,8 +763,8 @@ pub fn generateSymbol( const elem_vals = typed_value.val.castTag(.aggregate).?.data; const elem_ty = typed_value.ty.elemType(); const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - - (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) { + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, else => |e| return e, })) orelse return error.Overflow; @@ -784,8 +784,8 @@ pub fn generateSymbol( const array = typed_value.val.castTag(.repeated).?.data; const elem_ty = typed_value.ty.childType(); const len = typed_value.ty.arrayLen(); - const padding = math.cast(usize, typed_value.ty.abiSize(target) - - (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) { + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, else => |e| return e, })) orelse return error.Overflow; @@ -805,7 +805,7 @@ pub fn generateSymbol( .str_lit => { const str_lit = typed_value.val.castTag(.str_lit).?.data; const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - str_lit.len) orelse + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse return error.Overflow; try code.ensureUnusedCapacity(str_lit.len + padding); code.appendSliceAssumeCapacity(bytes); @@ -832,7 +832,7 @@ fn lowerParentPtr( debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { - const target = bin_file.options.target; + const mod = bin_file.options.module.?; switch (parent_ptr.tag()) { .field_ptr => { const field_ptr = parent_ptr.castTag(.field_ptr).?.data; @@ -843,19 +843,19 @@ fn lowerParentPtr( field_ptr.container_ptr, code, debug_output, - reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag()) { + reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) { .Pointer => offset: { assert(field_ptr.container_ty.isSlice()); var buf: Type.SlicePtrFieldTypeBuffer = undefined; break :offset switch (field_ptr.field_index) { 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(target), + 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(mod), else => unreachable, }; }, .Struct, .Union => field_ptr.container_ty.structFieldOffset( field_ptr.field_index, - target, + mod, ), else => return Result{ .fail = try ErrorMsg.create( bin_file.allocator, @@ -875,7 +875,7 @@ fn lowerParentPtr( elem_ptr.array_ptr, code, debug_output, - reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(target))), + reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(mod))), ); }, .opt_payload_ptr => { @@ -900,7 +900,7 @@ fn lowerParentPtr( eu_payload_ptr.container_ptr, code, debug_output, - reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, target))), + reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, mod))), ); }, .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( @@ -945,7 +945,7 @@ fn lowerDeclRef( reloc_info: RelocInfo, ) CodeGenError!Result { const target = bin_file.options.target; - const module = bin_file.options.module.?; + const mod = bin_file.options.module.?; if (typed_value.ty.isSlice()) { // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -961,7 +961,7 @@ fn lowerDeclRef( // generate length var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = typed_value.val.sliceLen(module), + .data = typed_value.val.sliceLen(mod), }; switch (try generateSymbol(bin_file, src_loc, .{ .ty = Type.usize, @@ -975,14 +975,14 @@ fn lowerDeclRef( } const ptr_width = target.ptrBitWidth(); - const decl = module.declPtr(decl_index); - const is_fn_body = decl.ty.zigTypeTag() == .Fn; - if (!is_fn_body and !decl.ty.hasRuntimeBits()) { + const decl = mod.declPtr(decl_index); + const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; + if (!is_fn_body and !decl.ty.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8)); return Result.ok; } - module.markDeclAlive(decl); + mod.markDeclAlive(decl); const vaddr = try bin_file.getDeclVAddr(decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, @@ -1059,16 +1059,16 @@ fn genDeclRef( tv: TypedValue, decl_index: Module.Decl.Index, ) CodeGenError!GenResult { - const module = bin_file.options.module.?; - log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(module), tv.val.fmtValue(tv.ty, module) }); + const mod = bin_file.options.module.?; + log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(mod), tv.val.fmtValue(tv.ty, mod) }); const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); - if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { const imm: u64 = switch (ptr_bytes) { 1 => 0xaa, 2 => 0xaaaa, @@ -1080,20 +1080,20 @@ fn genDeclRef( } // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? - if (tv.ty.castPtrToFn()) |fn_ty| { + if (tv.ty.castPtrToFn(mod)) |fn_ty| { if (fn_ty.fnInfo().is_generic) { - return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(target) }); + return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) }); } - } else if (tv.ty.zigTypeTag() == .Pointer) { - const elem_ty = tv.ty.elemType2(); - if (!elem_ty.hasRuntimeBits()) { - return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(target) }); + } else if (tv.ty.zigTypeTag(mod) == .Pointer) { + const elem_ty = tv.ty.elemType2(mod); + if (!elem_ty.hasRuntimeBits(mod)) { + return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod) }); } } - module.markDeclAlive(decl); + mod.markDeclAlive(decl); - const is_threadlocal = tv.val.isPtrToThreadLocal(module) and !bin_file.options.single_threaded; + const is_threadlocal = tv.val.isPtrToThreadLocal(mod) and !bin_file.options.single_threaded; if (bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index); @@ -1186,7 +1186,7 @@ pub fn genTypedValue( } } - switch (typed_value.ty.zigTypeTag()) { + switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => {}, @@ -1196,18 +1196,18 @@ pub fn genTypedValue( return GenResult.mcv(.{ .immediate = 0 }); }, .int_u64 => { - return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) }); + return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, else => {}, } }, }, .Int => { - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= ptr_bits) { const unsigned = switch (info.signedness) { - .signed => @bitCast(u64, typed_value.val.toSignedInt(target)), - .unsigned => typed_value.val.toUnsignedInt(target), + .signed => @bitCast(u64, typed_value.val.toSignedInt(mod)), + .unsigned => typed_value.val.toUnsignedInt(mod), }; return GenResult.mcv(.{ .immediate = unsigned }); } @@ -1216,7 +1216,7 @@ pub fn genTypedValue( return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) }); }, .Optional => { - if (typed_value.ty.isPtrLikeOptional()) { + if (typed_value.ty.isPtrLikeOptional(mod)) { if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); var buf: Type.Payload.ElemType = undefined; @@ -1224,8 +1224,8 @@ pub fn genTypedValue( .ty = typed_value.ty.optionalChild(&buf), .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, }, owner_decl_index); - } else if (typed_value.ty.abiSize(target) == 1) { - return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull()) }); + } else if (typed_value.ty.abiSize(mod) == 1) { + return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull(mod)) }); } }, .Enum => { @@ -1241,9 +1241,8 @@ pub fn genTypedValue( typed_value.ty.cast(Type.Payload.EnumFull).?.data.values; if (enum_values.count() != 0) { const tag_val = enum_values.keys()[field_index.data]; - var buf: Type.Payload.Bits = undefined; return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.intTagType(&buf), + .ty = typed_value.ty.intTagType(), .val = tag_val, }, owner_decl_index); } else { @@ -1253,8 +1252,7 @@ pub fn genTypedValue( else => unreachable, } } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer); + const int_tag_ty = typed_value.ty.intTagType(); return genTypedValue(bin_file, src_loc, .{ .ty = int_tag_ty, .val = typed_value.val, @@ -1281,7 +1279,7 @@ pub fn genTypedValue( const payload_type = typed_value.ty.errorUnionPayload(); const is_pl = typed_value.val.errorUnionIsPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); return genTypedValue(bin_file, src_loc, .{ @@ -1306,23 +1304,23 @@ pub fn genTypedValue( return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index); } -pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime()) { +pub fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return 0; } else { - return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(target), payload_align); + return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align); } } -pub fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime()) { - return mem.alignForwardGeneric(u64, payload_ty.abiSize(target), error_align); +pub fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align); } else { return 0; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 86b74b1429..da040a6fbb 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -16,6 +16,7 @@ const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const InternPool = @import("../InternPool.zig"); const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; @@ -285,10 +286,11 @@ pub const Function = struct { const gop = try f.value_map.getOrPut(inst); if (gop.found_existing) return gop.value_ptr.*; - const val = f.air.value(ref).?; + const mod = f.object.dg.module; + const val = f.air.value(ref, mod).?; const ty = f.air.typeOf(ref); - const result: CValue = if (lowersToArray(ty, f.object.dg.module.getTarget())) result: { + const result: CValue = if (lowersToArray(ty, mod)) result: { const writer = f.object.code_header.writer(); const alignment = 0; const decl_c_value = try f.allocLocalValue(ty, alignment); @@ -318,11 +320,11 @@ pub const Function = struct { /// those which go into `allocs`. This function does not add the resulting local into `allocs`; /// that responsibility lies with the caller. fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue { + const mod = f.object.dg.module; const gpa = f.object.dg.gpa; - const target = f.object.dg.module.getTarget(); try f.locals.append(gpa, .{ .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), }); return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; } @@ -336,10 +338,10 @@ pub const Function = struct { /// Only allocates the local; does not print anything. Will attempt to re-use locals, so should /// not be used for persistent locals (i.e. those in `allocs`). fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; if (f.free_locals_map.getPtr(.{ .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), })) |locals_list| { if (locals_list.popOrNull()) |local_entry| { return .{ .new_local = local_entry.key }; @@ -352,8 +354,9 @@ pub const Function = struct { fn writeCValue(f: *Function, w: anytype, c_value: CValue, location: ValueRenderLocation) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; return f.object.dg.renderValue(w, ty, val, location); }, .undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location), @@ -364,8 +367,9 @@ pub const Function = struct { fn writeCValueDeref(f: *Function, w: anytype, c_value: CValue) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); return w.writeByte(')'); @@ -377,8 +381,9 @@ pub const Function = struct { fn writeCValueMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); return f.writeCValue(w, member, .Other); @@ -390,8 +395,9 @@ pub const Function = struct { fn writeCValueDerefMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .constant => |inst| { + const mod = f.object.dg.module; const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const val = f.air.value(inst, mod).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); try w.writeAll(")->"); @@ -522,11 +528,12 @@ pub const DeclGen = struct { decl_index: Decl.Index, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const decl = dg.module.declPtr(decl_index); + const mod = dg.module; + const decl = mod.declPtr(decl_index); assert(decl.has_tv); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. - if (ty.isPtrAtRuntime() and !decl.ty.isFnOrHasRuntimeBits()) { + if (ty.isPtrAtRuntime(mod) and !decl.ty.isFnOrHasRuntimeBits(mod)) { return dg.writeCValue(writer, .{ .undef = ty }); } @@ -553,7 +560,7 @@ pub const DeclGen = struct { var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = val.sliceLen(dg.module), + .data = val.sliceLen(mod), }; const len_val = Value.initPayload(&len_pl.base); @@ -568,7 +575,7 @@ pub const DeclGen = struct { // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const need_typecast = if (ty.castPtrToFn()) |_| false else !ty.eql(decl.ty, dg.module); + const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.eql(decl.ty, mod); if (need_typecast) { try writer.writeAll("(("); try dg.renderType(writer, ty); @@ -584,6 +591,8 @@ pub const DeclGen = struct { // // Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { + const mod = dg.module; + if (!ptr_ty.isSlice()) { try writer.writeByte('('); try dg.renderType(writer, ptr_ty); @@ -601,7 +610,6 @@ pub const DeclGen = struct { try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location); }, .field_ptr => { - const target = dg.module.getTarget(); const field_ptr = ptr_val.castTag(.field_ptr).?.data; // Ensure complete type definition is visible before accessing fields. @@ -615,7 +623,7 @@ pub const DeclGen = struct { field_ptr.container_ty, ptr_ty, @intCast(u32, field_ptr.field_index), - target, + mod, )) { .begin => try dg.renderParentPtr( writer, @@ -714,19 +722,20 @@ pub const DeclGen = struct { if (val.castTag(.runtime_value)) |rt| { val = rt.data; } - const target = dg.module.getTarget(); + const mod = dg.module; + const target = mod.getTarget(); const initializer_type: ValueRenderLocation = switch (location) { .StaticInitializer => .StaticInitializer, else => .Initializer, }; - const safety_on = switch (dg.module.optimizeMode()) { + const safety_on = switch (mod.optimizeMode()) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; if (val.isUndefDeep()) { - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Bool => { if (safety_on) { return writer.writeAll("0xaa"); @@ -737,8 +746,8 @@ pub const DeclGen = struct { .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}), .Float => { const bits = ty.floatBits(target); - var repr_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits }; - const repr_ty = Type.initPayload(&repr_pl.base); + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -778,11 +787,11 @@ pub const DeclGen = struct { var opt_buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&opt_buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, Type.bool, val, location); } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return dg.renderValue(writer, payload_ty, val, location); } @@ -811,7 +820,7 @@ pub const DeclGen = struct { for (0..ty.structFieldCount()) |field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBits()) continue; + if (!field_ty.hasRuntimeBits(mod)) continue; if (!empty) try writer.writeByte(','); try dg.renderValue(writer, field_ty, val, initializer_type); @@ -832,17 +841,17 @@ pub const DeclGen = struct { try writer.writeByte('{'); if (ty.unionTagTypeSafety()) |tag_ty| { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); try dg.renderValue(writer, tag_ty, val, initializer_type); } - if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } for (ty.unionFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, val, initializer_type); break; } @@ -853,7 +862,7 @@ pub const DeclGen = struct { const payload_ty = ty.errorUnionPayload(); const error_ty = ty.errorUnionSet(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, val, location); } @@ -916,7 +925,7 @@ pub const DeclGen = struct { } unreachable; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Int => switch (val.tag()) { .field_ptr, .elem_ptr, @@ -931,8 +940,8 @@ pub const DeclGen = struct { const bits = ty.floatBits(target); const f128_val = val.toFloat(f128); - var repr_ty_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits }; - const repr_ty = Type.initPayload(&repr_ty_pl.base); + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; assert(bits <= 128); var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; @@ -1109,7 +1118,7 @@ pub const DeclGen = struct { }, else => unreachable, }; - const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(target)) else null; + const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; try writer.print("{s}", .{ fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel), }); @@ -1131,11 +1140,11 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try literal.writeChar(elem_val_u8); } if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(target)); + const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); if (s_u8 != 0) try literal.writeChar(s_u8); } try literal.end(); @@ -1145,7 +1154,7 @@ pub const DeclGen = struct { while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try writer.print("'\\x{x}'", .{elem_val_u8}); } if (ai.sentinel) |s| { @@ -1183,10 +1192,10 @@ pub const DeclGen = struct { const payload_ty = ty.optionalChild(&opt_buf); const is_null_val = Value.makeBool(val.tag() == .null_value); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return dg.renderValue(writer, Type.bool, is_null_val, location); - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val; return dg.renderValue(writer, payload_ty, payload_val, location); } @@ -1218,7 +1227,7 @@ pub const DeclGen = struct { const error_ty = ty.errorUnionSet(); const error_val = if (val.errorUnionIsPayload()) Value.zero else val; - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, error_val, location); } @@ -1263,8 +1272,7 @@ pub const DeclGen = struct { } }, else => { - var int_tag_ty_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_ty_buffer); + const int_tag_ty = ty.intTagType(); return dg.renderValue(writer, int_tag_ty, val, location); }, } @@ -1295,7 +1303,7 @@ pub const DeclGen = struct { for (field_vals, 0..) |field_val, field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeByte(','); try dg.renderValue(writer, field_ty, field_val, initializer_type); @@ -1306,13 +1314,10 @@ pub const DeclGen = struct { }, .Packed => { const field_vals = val.castTag(.aggregate).?.data; - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bits = Type.smallestUnsignedBits(int_info.bits - 1); + const bit_offset_ty = try mod.intType(.unsigned, bits); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); @@ -1321,7 +1326,7 @@ pub const DeclGen = struct { for (0..field_vals.len) |field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; eff_num_fields += 1; } @@ -1330,7 +1335,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderValue(writer, ty, Value.undef, initializer_type); try writer.writeByte(')'); - } else if (ty.bitSize(target) > 64) { + } else if (ty.bitSize(mod) > 64) { // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) var num_or = eff_num_fields - 1; while (num_or > 0) : (num_or -= 1) { @@ -1344,7 +1349,7 @@ pub const DeclGen = struct { for (field_vals, 0..) |field_val, field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const cast_context = IntCastContext{ .value = .{ .value = field_val } }; if (bit_offset_val_pl.data != 0) { @@ -1362,7 +1367,7 @@ pub const DeclGen = struct { if (needs_closing_paren) try writer.writeByte(')'); if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); - bit_offset_val_pl.data += field_ty.bitSize(target); + bit_offset_val_pl.data += field_ty.bitSize(mod); needs_closing_paren = true; eff_index += 1; } @@ -1373,7 +1378,7 @@ pub const DeclGen = struct { for (field_vals, 0..) |field_val, field_i| { if (ty.structFieldIsComptime(field_i)) continue; const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(" | "); try writer.writeByte('('); @@ -1388,7 +1393,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, field_ty, field_val, .Other); } - bit_offset_val_pl.data += field_ty.bitSize(target); + bit_offset_val_pl.data += field_ty.bitSize(mod); empty = false; } try writer.writeByte(')'); @@ -1408,12 +1413,12 @@ pub const DeclGen = struct { const field_ty = ty.unionFields().values()[field_i].ty; const field_name = ty.unionFields().keys()[field_i]; if (ty.containerLayout() == .Packed) { - if (field_ty.hasRuntimeBits()) { - if (field_ty.isPtrAtRuntime()) { + if (field_ty.hasRuntimeBits(mod)) { + if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); - } else if (field_ty.zigTypeTag() == .Float) { + } else if (field_ty.zigTypeTag(mod) == .Float) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); @@ -1427,21 +1432,21 @@ pub const DeclGen = struct { try writer.writeByte('{'); if (ty.unionTagTypeSafety()) |tag_ty| { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type); } - if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } - if (field_ty.hasRuntimeBits()) { + if (field_ty.hasRuntimeBits(mod)) { try writer.print(" .{ } = ", .{fmtIdent(field_name)}); try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); try writer.writeByte(' '); } else for (ty.unionFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, Value.undef, initializer_type); break; } @@ -1478,9 +1483,9 @@ pub const DeclGen = struct { }, ) !void { const store = &dg.ctypes.set; - const module = dg.module; + const mod = dg.module; - const fn_decl = module.declPtr(fn_decl_index); + const fn_decl = mod.declPtr(fn_decl_index); const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind); const fn_info = fn_decl.ty.fnInfo(); @@ -1498,7 +1503,7 @@ pub const DeclGen = struct { const trailing = try renderTypePrefix( dg.decl_index, store.*, - module, + mod, w, fn_cty_idx, .suffix, @@ -1525,7 +1530,7 @@ pub const DeclGen = struct { try renderTypeSuffix( dg.decl_index, store.*, - module, + mod, w, fn_cty_idx, .suffix, @@ -1577,9 +1582,9 @@ pub const DeclGen = struct { fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; - const module = dg.module; - _ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); - try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); + const mod = dg.module; + _ = try renderTypePrefix(dg.decl_index, store.*, mod, w, idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, mod, w, idx, .suffix, .{}); } const IntCastContext = union(enum) { @@ -1619,18 +1624,18 @@ pub const DeclGen = struct { /// | > 64 bit integer | < 64 bit integer | zig_make_(0, src) /// | > 64 bit integer | > 64 bit integer | zig_make_(zig_hi_(src), zig_lo_(src)) fn renderIntCast(dg: *DeclGen, w: anytype, dest_ty: Type, context: IntCastContext, src_ty: Type, location: ValueRenderLocation) !void { - const target = dg.module.getTarget(); - const dest_bits = dest_ty.bitSize(target); - const dest_int_info = dest_ty.intInfo(target); + const mod = dg.module; + const dest_bits = dest_ty.bitSize(mod); + const dest_int_info = dest_ty.intInfo(mod); - const src_is_ptr = src_ty.isPtrAtRuntime(); + const src_is_ptr = src_ty.isPtrAtRuntime(mod); const src_eff_ty: Type = if (src_is_ptr) switch (dest_int_info.signedness) { .unsigned => Type.usize, .signed => Type.isize, } else src_ty; - const src_bits = src_eff_ty.bitSize(target); - const src_int_info = if (src_eff_ty.isAbiInt()) src_eff_ty.intInfo(target) else null; + const src_bits = src_eff_ty.bitSize(mod); + const src_int_info = if (src_eff_ty.isAbiInt(mod)) src_eff_ty.intInfo(mod) else null; if (dest_bits <= 64 and src_bits <= 64) { const needs_cast = src_int_info == null or (toCIntBits(dest_int_info.bits) != toCIntBits(src_int_info.?.bits) or @@ -1703,8 +1708,8 @@ pub const DeclGen = struct { alignment: u32, kind: CType.Kind, ) error{ OutOfMemory, AnalysisFail }!void { - const target = dg.module.getTarget(); - const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)); + const mod = dg.module; + const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)); try dg.renderCTypeAndName(w, try dg.typeToIndex(ty, kind), name, qualifiers, alignas); } @@ -1717,7 +1722,7 @@ pub const DeclGen = struct { alignas: CType.AlignAs, ) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; - const module = dg.module; + const mod = dg.module; switch (std.math.order(alignas.@"align", alignas.abi)) { .lt => try w.print("zig_under_align({}) ", .{alignas.getAlign()}), @@ -1726,22 +1731,23 @@ pub const DeclGen = struct { } const trailing = - try renderTypePrefix(dg.decl_index, store.*, module, w, cty_idx, .suffix, qualifiers); + try renderTypePrefix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, qualifiers); try w.print("{}", .{trailing}); try dg.writeCValue(w, name); - try renderTypeSuffix(dg.decl_index, store.*, module, w, cty_idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, mod, w, cty_idx, .suffix, .{}); } fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { + const mod = dg.module; switch (tv.val.tag()) { .extern_fn => return true, .function => { const func = tv.val.castTag(.function).?.data; - return dg.module.decl_exports.contains(func.owner_decl); + return mod.decl_exports.contains(func.owner_decl); }, .variable => { const variable = tv.val.castTag(.variable).?.data; - return dg.module.decl_exports.contains(variable.owner_decl); + return mod.decl_exports.contains(variable.owner_decl); }, else => unreachable, } @@ -1838,10 +1844,11 @@ pub const DeclGen = struct { } fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index, export_index: u32) !void { - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); - if (dg.module.decl_exports.get(decl_index)) |exports| { + if (mod.decl_exports.get(decl_index)) |exports| { try writer.writeAll(exports.items[export_index].options.name); } else if (decl.isExtern()) { try writer.writeAll(mem.span(decl.name)); @@ -1850,7 +1857,7 @@ pub const DeclGen = struct { // expand to 3x the length of its input, but let's cut it off at a much shorter limit. var name: [100]u8 = undefined; var name_stream = std.io.fixedBufferStream(&name); - decl.renderFullyQualifiedName(dg.module, name_stream.writer()) catch |err| switch (err) { + decl.renderFullyQualifiedName(mod, name_stream.writer()) catch |err| switch (err) { error.NoSpaceLeft => {}, }; try writer.print("{}__{d}", .{ @@ -1894,10 +1901,10 @@ pub const DeclGen = struct { .bits => {}, } - const target = dg.module.getTarget(); - const int_info = if (ty.isAbiInt()) ty.intInfo(target) else std.builtin.Type.Int{ + const mod = dg.module; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(target)), + .bits = @intCast(u16, ty.bitSize(mod)), }; if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); @@ -1916,6 +1923,7 @@ pub const DeclGen = struct { val: Value, loc: ValueRenderLocation, ) !std.fmt.Formatter(formatIntLiteral) { + const mod = dg.module; const kind: CType.Kind = switch (loc) { .FunctionArgument => .parameter, .Initializer, .Other => .complete, @@ -1923,7 +1931,7 @@ pub const DeclGen = struct { }; return std.fmt.Formatter(formatIntLiteral){ .data = .{ .dg = dg, - .int_info = ty.intInfo(dg.module.getTarget()), + .int_info = ty.intInfo(mod), .kind = kind, .cty = try dg.typeToCType(ty, kind), .val = val, @@ -2646,11 +2654,12 @@ pub fn genDecl(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); + const mod = o.dg.module; const decl = o.dg.decl.?; const decl_c_value = .{ .decl = o.dg.decl_index.unwrap().? }; const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; - if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime()) return; + if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return; if (tv.val.tag() == .extern_fn) { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll("zig_extern "); @@ -2704,8 +2713,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { .val = dg.decl.?.val, }; const writer = dg.fwd_decl.writer(); + const mod = dg.module; - switch (tv.ty.zigTypeTag()) { + switch (tv.ty.zigTypeTag(mod)) { .Fn => { const is_global = dg.declIsGlobal(tv); if (is_global) { @@ -2791,6 +2801,7 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con } fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { + const mod = f.object.dg.module; const air_tags = f.air.instructions.items(.tag); for (body) |inst| { @@ -2826,10 +2837,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none), .rem => blk: { const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(); + const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(mod); // For binary operations @TypeOf(lhs)==@TypeOf(rhs), // so we only check one. - break :blk if (lhs_scalar_ty.isInt()) + break :blk if (lhs_scalar_ty.isInt(mod)) try airBinOp(f, inst, "%", "rem", .none) else try airBinFloatOp(f, inst, "fmod"); @@ -3095,9 +3106,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ } fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3120,13 +3132,14 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); const ptr_ty = f.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const ptr = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3155,9 +3168,10 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3180,13 +3194,14 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); const slice_ty = f.air.typeOf(bin_op.lhs); - const elem_ty = slice_ty.elemType2(); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(); + const elem_ty = slice_ty.elemType2(mod); + const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const slice = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3209,9 +3224,10 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } @@ -3234,14 +3250,14 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const elem_type = inst_ty.elemType(); - if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty }; + if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; - const target = f.object.dg.module.getTarget(); const local = try f.allocLocalValue( elem_type, - inst_ty.ptrAlignment(target), + inst_ty.ptrAlignment(mod), ); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; @@ -3250,14 +3266,14 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const inst_ty = f.air.typeOfIndex(inst); const elem_ty = inst_ty.elemType(); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return .{ .undef = inst_ty }; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; - const target = f.object.dg.module.getTarget(); const local = try f.allocLocalValue( elem_ty, - inst_ty.ptrAlignment(target), + inst_ty.ptrAlignment(mod), ); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.module.gpa; @@ -3290,14 +3306,15 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const ptr_ty = f.air.typeOf(ty_op.operand); - const ptr_scalar_ty = ptr_ty.scalarType(); + const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const src_ty = ptr_info.pointee_type; - if (!src_ty.hasRuntimeBitsIgnoreComptime()) { + if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ty_op.operand}); return .none; } @@ -3306,9 +3323,8 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); - const target = f.object.dg.module.getTarget(); - const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(target); - const is_array = lowersToArray(src_ty, target); + const is_aligned = ptr_info.@"align" == 0 or ptr_info.@"align" >= src_ty.abiAlignment(mod); + const is_array = lowersToArray(src_ty, mod); const need_memcpy = !is_aligned or is_array; const writer = f.object.writer(); @@ -3327,17 +3343,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, src_ty); try writer.writeAll("))"); } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) { - var host_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const host_ty = Type.initPayload(&host_pl.base); + const host_bits: u16 = ptr_info.host_size * 8; + const host_ty = try mod.intType(.unsigned, host_bits); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(host_pl.data - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -3345,11 +3354,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - var field_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, src_ty.bitSize(target)), - }; - const field_ty = Type.initPayload(&field_pl.base); + const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod))); try f.writeCValue(writer, local, .Other); try v.elem(f, writer); @@ -3360,9 +3365,9 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("(("); try f.renderType(writer, field_ty); try writer.writeByte(')'); - const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; + const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64; if (cant_cast) { - if (field_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); @@ -3390,23 +3395,23 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - const target = f.object.dg.module.getTarget(); const op_inst = Air.refToIndex(un_op); const op_ty = f.air.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType() else op_ty; var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); + const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) { try reap(f, inst, &.{un_op}); _ = try airCall(f, op_inst.?, .always_tail); - } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); var deref = is_ptr; - const is_array = lowersToArray(ret_ty, target); + const is_array = lowersToArray(ret_ty, mod); const ret_val = if (is_array) ret_val: { const array_local = try f.allocLocal(inst, lowered_ret_ty); try writer.writeAll("memcpy("); @@ -3442,15 +3447,16 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3467,20 +3473,20 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const target = f.object.dg.module.getTarget(); - const dest_int_info = inst_scalar_ty.intInfo(target); + const inst_scalar_ty = inst_ty.scalarType(mod); + const dest_int_info = inst_scalar_ty.intInfo(mod); const dest_bits = dest_int_info.bits; const dest_c_bits = toCIntBits(dest_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); - const scalar_int_info = scalar_ty.intInfo(target); + const scalar_ty = operand_ty.scalarType(mod); + const scalar_int_info = scalar_ty.intInfo(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3515,7 +3521,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - const mask_val = try inst_scalar_ty.maxInt(stack.get(), target); + const mask_val = try inst_scalar_ty.maxInt(stack.get(), mod); try writer.writeAll("zig_and_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); try writer.writeByte('('); @@ -3577,17 +3583,18 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { } fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { + const mod = f.object.dg.module; // *a = b; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const ptr_ty = f.air.typeOf(bin_op.lhs); - const ptr_scalar_ty = ptr_ty.scalarType(); + const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.air.typeOf(bin_op.rhs); - const val_is_undef = if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false; + const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3602,10 +3609,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { return .none; } - const target = f.object.dg.module.getTarget(); const is_aligned = ptr_info.@"align" == 0 or - ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(target); - const is_array = lowersToArray(ptr_info.pointee_type, target); + ptr_info.@"align" >= ptr_info.pointee_type.abiAlignment(mod); + const is_array = lowersToArray(ptr_info.pointee_type, mod); const need_memcpy = !is_aligned or is_array; const src_val = try f.resolveInst(bin_op.rhs); @@ -3647,14 +3653,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } } else if (ptr_info.host_size > 0 and ptr_info.vector_index == .none) { const host_bits = ptr_info.host_size * 8; - var host_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = host_bits }; - const host_ty = Type.initPayload(&host_pl.base); + const host_ty = try mod.intType(.unsigned, host_bits); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(host_bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -3662,7 +3663,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - const src_bits = src_ty.bitSize(target); + const src_bits = src_ty.bitSize(mod); const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb; var stack align(@alignOf(ExpectedContents)) = @@ -3693,9 +3694,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.print(", {x}), zig_shl_", .{try f.fmtIntLiteral(host_ty, mask_val)}); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeByte('('); - const cant_cast = host_ty.isInt() and host_ty.bitSize(target) > 64; + const cant_cast = host_ty.isInt(mod) and host_ty.bitSize(mod) > 64; if (cant_cast) { - if (src_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (src_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_make_"); try f.object.dg.renderTypeForBuiltinFnName(writer, host_ty); try writer.writeAll("(0, "); @@ -3705,7 +3706,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeByte(')'); } - if (src_ty.isPtrAtRuntime()) { + if (src_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); @@ -3728,6 +3729,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3737,7 +3739,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: const inst_ty = f.air.typeOfIndex(inst); const operand_ty = f.air.typeOf(bin_op.lhs); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const w = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3765,9 +3767,10 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: } fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits); const op = try f.resolveInst(ty_op.operand); @@ -3797,11 +3800,11 @@ fn airBinOp( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const operand_ty = f.air.typeOf(bin_op.lhs); - const scalar_ty = operand_ty.scalarType(); - const target = f.object.dg.module.getTarget(); - if ((scalar_ty.isInt() and scalar_ty.bitSize(target) > 64) or scalar_ty.isRuntimeFloat()) + const scalar_ty = operand_ty.scalarType(mod); + if ((scalar_ty.isInt(mod) and scalar_ty.bitSize(mod) > 64) or scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, info); const lhs = try f.resolveInst(bin_op.lhs); @@ -3835,12 +3838,12 @@ fn airCmpOp( data: anytype, operator: std.math.CompareOperator, ) !CValue { + const mod = f.object.dg.module; const lhs_ty = f.air.typeOf(data.lhs); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); - const target = f.object.dg.module.getTarget(); - const scalar_bits = scalar_ty.bitSize(target); - if (scalar_ty.isInt() and scalar_bits > 64) + const scalar_bits = scalar_ty.bitSize(mod); + if (scalar_ty.isInt(mod) and scalar_bits > 64) return airCmpBuiltinCall( f, inst, @@ -3885,12 +3888,12 @@ fn airEquality( inst: Air.Inst.Index, operator: std.math.CompareOperator, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const operand_ty = f.air.typeOf(bin_op.lhs); - const target = f.object.dg.module.getTarget(); - const operand_bits = operand_ty.bitSize(target); - if (operand_ty.isInt() and operand_bits > 64) + const operand_bits = operand_ty.bitSize(mod); + if (operand_ty.isInt(mod) and operand_bits > 64) return airCmpBuiltinCall( f, inst, @@ -3912,7 +3915,7 @@ fn airEquality( try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (operand_ty.zigTypeTag() == .Optional and !operand_ty.optionalReprIsPayload()) { + if (operand_ty.zigTypeTag(mod) == .Optional and !operand_ty.optionalReprIsPayload(mod)) { // (A && B) || (C && (A == B)) // A = lhs.is_null ; B = rhs.is_null ; C = rhs.payload == lhs.payload @@ -3965,6 +3968,7 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3973,8 +3977,8 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const elem_ty = inst_scalar_ty.elemType2(); + const inst_scalar_ty = inst_ty.scalarType(mod); + const elem_ty = inst_scalar_ty.elemType2(mod); const local = try f.allocLocal(inst, inst_ty); const writer = f.object.writer(); @@ -3983,7 +3987,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { try v.elem(f, writer); try writer.writeAll(" = "); - if (elem_ty.hasRuntimeBitsIgnoreComptime()) { + if (elem_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We must convert to and from integer types to prevent UB if the operation // results in a NULL pointer, or if LHS is NULL. The operation is only UB // if the result is NULL and then dereferenced. @@ -4012,13 +4016,13 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { } fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); - const target = f.object.dg.module.getTarget(); - if (inst_scalar_ty.isInt() and inst_scalar_ty.bitSize(target) > 64) + if (inst_scalar_ty.isInt(mod) and inst_scalar_ty.bitSize(mod) > 64) return try airBinBuiltinCall(f, inst, operation[1..], .none); if (inst_scalar_ty.isRuntimeFloat()) return try airBinFloatOp(f, inst, operation); @@ -4092,12 +4096,11 @@ fn airCall( inst: Air.Inst.Index, modifier: std.builtin.CallModifier, ) !CValue { + const mod = f.object.dg.module; // Not even allowed to call panic in a naked function. if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; const gpa = f.object.dg.gpa; - const module = f.object.dg.module; - const target = module.getTarget(); const writer = f.object.writer(); const pl_op = f.air.instructions.items(.data)[inst].pl_op; @@ -4116,7 +4119,7 @@ fn airCall( resolved_arg.* = try f.resolveInst(arg); if (arg_cty != try f.typeToIndex(arg_ty, .complete)) { var lowered_arg_buf: LowerFnRetTyBuffer = undefined; - const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, target); + const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, mod); const array_local = try f.allocLocal(inst, lowered_arg_ty); try writer.writeAll("memcpy("); @@ -4139,7 +4142,7 @@ fn airCall( } const callee_ty = f.air.typeOf(pl_op.operand); - const fn_ty = switch (callee_ty.zigTypeTag()) { + const fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => callee_ty.childType(), else => unreachable, @@ -4147,13 +4150,13 @@ fn airCall( const ret_ty = fn_ty.fnReturnType(); var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, target); + const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); const result_local = result: { if (modifier == .always_tail) { try writer.writeAll("zig_always_tail return "); break :result .none; - } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result .none; } else if (f.liveness.isUnused(inst)) { try writer.writeByte('('); @@ -4171,7 +4174,7 @@ fn airCall( callee: { known: { const fn_decl = fn_decl: { - const callee_val = f.air.value(pl_op.operand) orelse break :known; + const callee_val = f.air.value(pl_op.operand, mod) orelse break :known; break :fn_decl switch (callee_val.tag()) { .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, .function => callee_val.castTag(.function).?.data.owner_decl, @@ -4181,9 +4184,9 @@ fn airCall( }; switch (modifier) { .auto, .always_tail => try f.object.dg.renderDeclName(writer, fn_decl, 0), - inline .never_tail, .never_inline => |mod| try writer.writeAll(try f.getLazyFnName( - @unionInit(LazyFnKey, @tagName(mod), fn_decl), - @unionInit(LazyFnValue.Data, @tagName(mod), {}), + inline .never_tail, .never_inline => |m| try writer.writeAll(try f.getLazyFnName( + @unionInit(LazyFnKey, @tagName(m), fn_decl), + @unionInit(LazyFnValue.Data, @tagName(m), {}), )), else => unreachable, } @@ -4211,7 +4214,7 @@ fn airCall( try writer.writeAll(");\n"); const result = result: { - if (result_local == .none or !lowersToArray(ret_ty, target)) + if (result_local == .none or !lowersToArray(ret_ty, mod)) break :result result_local; const array_local = try f.allocLocal(inst, ret_ty); @@ -4254,9 +4257,10 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (f.air.value(pl_op.operand)) |v| v.isUndefDeep() else false; + const operand_is_undef = if (f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4330,12 +4334,13 @@ fn lowerTry( err_union_ty: Type, is_ptr: bool, ) !CValue { + const mod = f.object.dg.module; const err_union = try f.resolveInst(operand); const inst_ty = f.air.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); const payload_ty = err_union_ty.errorUnionPayload(); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { try writer.writeAll("if ("); @@ -4431,6 +4436,8 @@ const LocalResult = struct { need_free: bool, fn move(lr: LocalResult, f: *Function, inst: Air.Inst.Index, dest_ty: Type) !CValue { + const mod = f.object.dg.module; + if (lr.need_free) { // Move the freshly allocated local to be owned by this instruction, // by returning it here instead of freeing it. @@ -4441,7 +4448,7 @@ const LocalResult = struct { try lr.free(f); const writer = f.object.writer(); try f.writeCValue(writer, local, .Other); - if (dest_ty.isAbiInt()) { + if (dest_ty.isAbiInt(mod)) { try writer.writeAll(" = "); } else { try writer.writeAll(" = ("); @@ -4461,12 +4468,13 @@ const LocalResult = struct { }; fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !LocalResult { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; + const target = mod.getTarget(); const writer = f.object.writer(); - if (operand_ty.isAbiInt() and dest_ty.isAbiInt()) { - const src_info = dest_ty.intInfo(target); - const dest_info = operand_ty.intInfo(target); + if (operand_ty.isAbiInt(mod) and dest_ty.isAbiInt(mod)) { + const src_info = dest_ty.intInfo(mod); + const dest_info = operand_ty.intInfo(mod); if (src_info.signedness == dest_info.signedness and src_info.bits == dest_info.bits) { @@ -4477,7 +4485,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca } } - if (dest_ty.isPtrAtRuntime() and operand_ty.isPtrAtRuntime()) { + if (dest_ty.isPtrAtRuntime(mod) and operand_ty.isPtrAtRuntime(mod)) { const local = try f.allocLocal(0, dest_ty); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = ("); @@ -4494,7 +4502,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca const operand_lval = if (operand == .constant) blk: { const operand_local = try f.allocLocal(0, operand_ty); try f.writeCValue(writer, operand_local, .Other); - if (operand_ty.isAbiInt()) { + if (operand_ty.isAbiInt(mod)) { try writer.writeAll(" = "); } else { try writer.writeAll(" = ("); @@ -4516,13 +4524,10 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca try writer.writeAll("));\n"); // Ensure padding bits have the expected value. - if (dest_ty.isAbiInt()) { + if (dest_ty.isAbiInt(mod)) { const dest_cty = try f.typeToCType(dest_ty, .complete); - const dest_info = dest_ty.intInfo(target); - var info_ty_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dest_info.signedness) { - .unsigned => .int_unsigned, - .signed => .int_signed, - } }, .data = dest_info.bits }; + const dest_info = dest_ty.intInfo(mod); + var bits: u16 = dest_info.bits; var wrap_cty: ?CType = null; var need_bitcasts = false; @@ -4535,9 +4540,9 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca const elem_cty = f.indexToCType(pl.data.elem_type); wrap_cty = elem_cty.toSignedness(dest_info.signedness); need_bitcasts = wrap_cty.?.tag() == .zig_i128; - info_ty_pl.data -= 1; - info_ty_pl.data %= @intCast(u16, f.byteSize(elem_cty) * 8); - info_ty_pl.data += 1; + bits -= 1; + bits %= @intCast(u16, f.byteSize(elem_cty) * 8); + bits += 1; } try writer.writeAll(" = "); if (need_bitcasts) { @@ -4546,7 +4551,7 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca try writer.writeByte('('); } try writer.writeAll("zig_wrap_"); - const info_ty = Type.initPayload(&info_ty_pl.base); + const info_ty = try mod.intType(dest_info.signedness, bits); if (wrap_cty) |cty| try f.object.dg.renderCTypeForBuiltinFnName(writer, cty) else @@ -4675,6 +4680,7 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const condition = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4683,11 +4689,11 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); try writer.writeAll("switch ("); - if (condition_ty.zigTypeTag() == .Bool) { + if (condition_ty.zigTypeTag(mod) == .Bool) { try writer.writeByte('('); try f.renderType(writer, Type.u1); try writer.writeByte(')'); - } else if (condition_ty.isPtrAtRuntime()) { + } else if (condition_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); @@ -4714,12 +4720,12 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { for (items) |item| { try f.object.indent_writer.insertNewline(); try writer.writeAll("case "); - if (condition_ty.isPtrAtRuntime()) { + if (condition_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); try writer.writeByte(')'); } - try f.object.dg.renderValue(writer, condition_ty, f.air.value(item).?, .Other); + try f.object.dg.renderValue(writer, condition_ty, f.air.value(item, mod).?, .Other); try writer.writeByte(':'); } try writer.writeByte(' '); @@ -4764,6 +4770,7 @@ fn asmInputNeedsLocal(constraint: []const u8, value: CValue) bool { } fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Asm, ty_pl.payload); const is_volatile = @truncate(u1, extra.data.flags >> 31) != 0; @@ -4778,7 +4785,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const result = result: { const writer = f.object.writer(); const inst_ty = f.air.typeOfIndex(inst); - const local = if (inst_ty.hasRuntimeBitsIgnoreComptime()) local: { + const local = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod)) local: { const local = try f.allocLocal(inst, inst_ty); if (f.wantSafety()) { try f.writeCValue(writer, local, .Other); @@ -5025,6 +5032,7 @@ fn airIsNull( operator: []const u8, is_ptr: bool, ) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); @@ -5046,14 +5054,14 @@ fn airIsNull( const payload_ty = optional_ty.optionalChild(&payload_buf); var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) TypedValue{ .ty = Type.bool, .val = Value.true } - else if (optional_ty.isPtrLikeOptional()) + else if (optional_ty.isPtrLikeOptional(mod)) // operand is a regular pointer, test `operand !=/== NULL` TypedValue{ .ty = optional_ty, .val = Value.null } - else if (payload_ty.zigTypeTag() == .ErrorSet) + else if (payload_ty.zigTypeTag(mod) == .ErrorSet) TypedValue{ .ty = payload_ty, .val = Value.zero } - else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload()) rhs: { + else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf); break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; @@ -5070,6 +5078,7 @@ fn airIsNull( } fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -5079,7 +5088,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return .none; } @@ -5087,7 +5096,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); try f.writeCValue(writer, operand, .Other); @@ -5104,6 +5113,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { } fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const writer = f.object.writer(); @@ -5113,14 +5123,14 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { const opt_ty = ptr_ty.childType(); const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) { return .{ .undef = inst_ty }; } const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { // the operand is just a regular pointer, no need to do anything special. // *?*T -> **T and ?*T -> *T are **T -> **T and *T -> *T in C try writer.writeAll(" = "); @@ -5134,6 +5144,7 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); @@ -5144,7 +5155,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.air.typeOfIndex(inst); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { if (f.liveness.isUnused(inst)) { return .none; } @@ -5179,36 +5190,36 @@ fn fieldLocation( container_ty: Type, field_ptr_ty: Type, field_index: u32, - target: std.Target, + mod: *const Module, ) union(enum) { begin: void, field: CValue, byte_offset: u32, end: void, } { - return switch (container_ty.zigTypeTag()) { + return switch (container_ty.zigTypeTag(mod)) { .Struct => switch (container_ty.containerLayout()) { .Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| { if (container_ty.structFieldIsComptime(next_field_index)) continue; const field_ty = container_ty.structFieldType(next_field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; break .{ .field = if (container_ty.isSimpleTuple()) .{ .field = next_field_index } else .{ .identifier = container_ty.structFieldName(next_field_index) } }; - } else if (container_ty.hasRuntimeBitsIgnoreComptime()) .end else .begin, + } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, .Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0) - .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, target) } + .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } else .begin, }, .Union => switch (container_ty.containerLayout()) { .Auto, .Extern => { const field_ty = container_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return if (container_ty.unionTagTypeSafety() != null and - !container_ty.unionHasAllZeroBitFieldTypes()) + !container_ty.unionHasAllZeroBitFieldTypes(mod)) .{ .field = .{ .identifier = "payload" } } else .begin; @@ -5252,10 +5263,10 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue } fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const target = f.object.dg.module.getTarget(); const container_ptr_ty = f.air.typeOfIndex(inst); const container_ty = container_ptr_ty.childType(); @@ -5270,7 +5281,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, container_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, target)) { + switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { var u8_ptr_pl = field_ptr_ty.ptrInfo(); @@ -5321,7 +5332,7 @@ fn fieldPtr( container_ptr_val: CValue, field_index: u32, ) !CValue { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; const container_ty = container_ptr_ty.elemType(); const field_ptr_ty = f.air.typeOfIndex(inst); @@ -5335,7 +5346,7 @@ fn fieldPtr( try f.renderType(writer, field_ptr_ty); try writer.writeByte(')'); - switch (fieldLocation(container_ty, field_ptr_ty, field_index, target)) { + switch (fieldLocation(container_ty, field_ptr_ty, field_index, mod)) { .begin => try f.writeCValue(writer, container_ptr_val, .Initializer), .field => |field| { try writer.writeByte('&'); @@ -5370,16 +5381,16 @@ fn fieldPtr( } fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime()) { + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{extra.struct_operand}); return .none; } - const target = f.object.dg.module.getTarget(); const struct_byval = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); const struct_ty = f.air.typeOf(extra.struct_operand); @@ -5396,32 +5407,21 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { .{ .identifier = struct_ty.structFieldName(extra.field_index) }, .Packed => { const struct_obj = struct_ty.castTag(.@"struct").?.data; - const int_info = struct_ty.intInfo(target); + const int_info = struct_ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = struct_obj.packedFieldBitOffset(target, extra.field_index), + .data = struct_obj.packedFieldBitOffset(mod, extra.field_index), }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - const field_int_signedness = if (inst_ty.isAbiInt()) - inst_ty.intInfo(target).signedness + const field_int_signedness = if (inst_ty.isAbiInt(mod)) + inst_ty.intInfo(mod).signedness else .unsigned; - var field_int_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (field_int_signedness) { - .unsigned => .int_unsigned, - .signed => .int_signed, - } }, - .data = @intCast(u16, inst_ty.bitSize(target)), - }; - const field_int_ty = Type.initPayload(&field_int_pl.base); + const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); const temp_local = try f.allocLocal(inst, field_int_ty); try f.writeCValue(writer, temp_local, .Other); @@ -5432,7 +5432,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); const cant_cast = int_info.bits > 64; if (cant_cast) { - if (field_int_ty.bitSize(target) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); try writer.writeAll("zig_lo_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); @@ -5511,6 +5511,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { /// *(E!T) -> E /// Note that the result is never a pointer. fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); @@ -5518,13 +5519,13 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.air.typeOf(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer; + const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const local = try f.allocLocal(inst, inst_ty); - if (!payload_ty.hasRuntimeBits() and operand == .local and operand.local == local.new_local) { + if (!payload_ty.hasRuntimeBits(mod) and operand == .local and operand.local == local.new_local) { // The store will be 'x = x'; elide it. return local; } @@ -5533,7 +5534,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBits(mod)) { try f.writeCValue(writer, operand, .Other); } else { if (!error_ty.errorSetIsEmpty()) @@ -5549,6 +5550,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); @@ -5558,7 +5560,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; const writer = f.object.writer(); - if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) { + if (!error_union_ty.errorUnionPayload().hasRuntimeBits(mod)) { if (!is_ptr) return .none; const local = try f.allocLocal(inst, inst_ty); @@ -5584,10 +5586,11 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu } fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); - const repr_is_payload = inst_ty.optionalReprIsPayload(); + const repr_is_payload = inst_ty.optionalReprIsPayload(mod); const payload_ty = f.air.typeOf(ty_op.operand); const payload = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5615,11 +5618,12 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_ty = inst_ty.errorUnionSet(); const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5653,6 +5657,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -5662,7 +5667,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const payload_ty = error_union_ty.errorUnionPayload(); // First, set the non-error value. - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { try f.writeCValueDeref(writer, operand); try writer.writeAll(" = "); try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); @@ -5703,12 +5708,13 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); const payload = try f.resolveInst(ty_op.operand); - const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(); + const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_ty = inst_ty.errorUnionSet(); try reap(f, inst, &.{ty_op.operand}); @@ -5735,6 +5741,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { } fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); @@ -5750,7 +5757,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const try writer.writeAll(" = "); if (!error_ty.errorSetIsEmpty()) - if (payload_ty.hasRuntimeBits()) + if (payload_ty.hasRuntimeBits(mod)) if (is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) else @@ -5768,6 +5775,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const } fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -5784,7 +5792,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { if (operand == .undef) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer); - } else if (array_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); @@ -5801,6 +5809,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.air.typeOfIndex(inst); @@ -5810,10 +5819,10 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { const target = f.object.dg.module.getTarget(); const operation = if (inst_ty.isRuntimeFloat() and operand_ty.isRuntimeFloat()) if (inst_ty.floatBits(target) < operand_ty.floatBits(target)) "trunc" else "extend" - else if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) - if (inst_ty.isSignedInt()) "fix" else "fixuns" - else if (inst_ty.isRuntimeFloat() and operand_ty.isInt()) - if (operand_ty.isSignedInt()) "float" else "floatun" + else if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) + if (inst_ty.isSignedInt(mod)) "fix" else "fixuns" + else if (inst_ty.isRuntimeFloat() and operand_ty.isInt(mod)) + if (operand_ty.isSignedInt(mod)) "float" else "floatun" else unreachable; @@ -5822,19 +5831,19 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) { + if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) { try writer.writeAll("zig_wrap_"); try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); try writer.writeByte('('); } try writer.writeAll("zig_"); try writer.writeAll(operation); - try writer.writeAll(compilerRtAbbrev(operand_ty, target)); - try writer.writeAll(compilerRtAbbrev(inst_ty, target)); + try writer.writeAll(compilerRtAbbrev(operand_ty, mod)); + try writer.writeAll(compilerRtAbbrev(inst_ty, mod)); try writer.writeByte('('); try f.writeCValue(writer, operand, .FunctionArgument); try writer.writeByte(')'); - if (inst_ty.isInt() and operand_ty.isRuntimeFloat()) { + if (inst_ty.isInt(mod) and operand_ty.isRuntimeFloat()) { try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); try writer.writeByte(')'); } @@ -5871,14 +5880,15 @@ fn airUnBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const operand_ty = f.air.typeOf(ty_op.operand); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -5914,6 +5924,7 @@ fn airBinBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const operand_ty = f.air.typeOf(bin_op.lhs); @@ -5925,8 +5936,8 @@ fn airBinBuiltinCall( if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); - const scalar_ty = operand_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -5968,14 +5979,15 @@ fn airCmpBuiltinCall( operation: enum { cmp, operator }, info: BuiltinInfo, ) !CValue { + const mod = f.object.dg.module; const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const operand_ty = f.air.typeOf(data.lhs); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); const ref_ret = inst_scalar_cty.tag() == .array; @@ -6017,6 +6029,7 @@ fn airCmpBuiltinCall( } fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const inst_ty = f.air.typeOfIndex(inst); @@ -6030,15 +6043,13 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value); try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; const local = try f.allocLocal(inst, inst_ty); - if (inst_ty.isPtrLikeOptional()) { + if (inst_ty.isPtrLikeOptional(mod)) { { const a = try Assignment.start(f, writer, ty); try f.writeCValue(writer, local, .Other); @@ -6123,6 +6134,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue } fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; const inst_ty = f.air.typeOfIndex(inst); @@ -6135,14 +6147,10 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const operand_mat = try Materialize.start(f, inst, writer, ty, operand); try reap(f, inst, &.{ pl_op.operand, extra.operand }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; + const repr_bits = @intCast(u16, ty.abiSize(mod) * 8); const is_float = ty.isRuntimeFloat(); - const is_128 = repr_pl.data == 128; - const repr_ty = if (is_float) Type.initPayload(&repr_pl.base) else ty; + const is_128 = repr_bits == 128; + const repr_ty = if (is_float) mod.intType(.unsigned, repr_bits) catch unreachable else ty; const local = try f.allocLocal(inst, inst_ty); try writer.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())}); @@ -6181,18 +6189,17 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const atomic_load = f.air.instructions.items(.data)[inst].atomic_load; const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); const ptr_ty = f.air.typeOf(atomic_load.ptr); const ty = ptr_ty.childType(); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); @@ -6218,6 +6225,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const ptr_ty = f.air.typeOf(bin_op.lhs); const ty = ptr_ty.childType(); @@ -6228,12 +6236,10 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa const element_mat = try Materialize.start(f, inst, writer, ty, element); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); - var repr_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.abiSize(target) * 8), - }; - const repr_ty = if (ty.isRuntimeFloat()) Type.initPayload(&repr_pl.base) else ty; + const repr_ty = if (ty.isRuntimeFloat()) + mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable + else + ty; try writer.writeAll("zig_atomic_store((zig_atomic("); try f.renderType(writer, ty); @@ -6262,14 +6268,14 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo } fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const dest_ty = f.air.typeOf(bin_op.lhs); const dest_slice = try f.resolveInst(bin_op.lhs); const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.air.typeOf(bin_op.rhs); - const target = f.object.dg.module.getTarget(); - const elem_abi_size = elem_ty.abiSize(target); - const val_is_undef = if (f.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const elem_abi_size = elem_ty.abiSize(mod); + const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6383,12 +6389,12 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try f.resolveInst(bin_op.lhs); const src_ptr = try f.resolveInst(bin_op.rhs); const dest_ty = f.air.typeOf(bin_op.lhs); const src_ty = f.air.typeOf(bin_op.rhs); - const target = f.object.dg.module.getTarget(); const writer = f.object.writer(); try writer.writeAll("memcpy("); @@ -6399,7 +6405,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { switch (dest_ty.ptrSize()) { .Slice => { const elem_ty = dest_ty.childType(); - const elem_abi_size = elem_ty.abiSize(target); + const elem_abi_size = elem_ty.abiSize(mod); try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }); if (elem_abi_size > 1) { try writer.print(" * {d});\n", .{elem_abi_size}); @@ -6410,7 +6416,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { .One => { const array_ty = dest_ty.childType(); const elem_ty = array_ty.childType(); - const elem_abi_size = elem_ty.abiSize(target); + const elem_abi_size = elem_ty.abiSize(mod); const len = array_ty.arrayLen() * elem_abi_size; try writer.print("{d});\n", .{len}); }, @@ -6422,14 +6428,14 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const union_ptr = try f.resolveInst(bin_op.lhs); const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const target = f.object.dg.module.getTarget(); const union_ty = f.air.typeOf(bin_op.lhs).childType(); - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety().?; @@ -6443,14 +6449,14 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const union_ty = f.air.typeOf(ty_op.operand); - const target = f.object.dg.module.getTarget(); - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const inst_ty = f.air.typeOfIndex(inst); @@ -6501,13 +6507,14 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { } fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6555,6 +6562,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { } fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -6562,8 +6570,6 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const lhs = try f.resolveInst(extra.a); const rhs = try f.resolveInst(extra.b); - const module = f.object.dg.module; - const target = module.getTarget(); const inst_ty = f.air.typeOfIndex(inst); const writer = f.object.writer(); @@ -6581,7 +6587,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("] = "); var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(target); + const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); var src_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = @intCast(u64, mask_elem ^ mask_elem >> 63), @@ -6597,16 +6603,17 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { } fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const reduce = f.air.instructions.items(.data)[inst].reduce; - const target = f.object.dg.module.getTarget(); + const target = mod.getTarget(); const scalar_ty = f.air.typeOfIndex(inst); const operand = try f.resolveInst(reduce.operand); try reap(f, inst, &.{reduce.operand}); const operand_ty = f.air.typeOf(reduce.operand); const writer = f.object.writer(); - const use_operator = scalar_ty.bitSize(target) <= 64; + const use_operator = scalar_ty.bitSize(mod) <= 64; const op: union(enum) { const Func = struct { operation: []const u8, info: BuiltinInfo = .none }; float_op: Func, @@ -6617,28 +6624,28 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .And => if (use_operator) .{ .infix = " &= " } else .{ .builtin = .{ .operation = "and" } }, .Or => if (use_operator) .{ .infix = " |= " } else .{ .builtin = .{ .operation = "or" } }, .Xor => if (use_operator) .{ .infix = " ^= " } else .{ .builtin = .{ .operation = "xor" } }, - .Min => switch (scalar_ty.zigTypeTag()) { + .Min => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .ternary = " < " } else .{ .builtin = .{ .operation = "min" }, }, .Float => .{ .float_op = .{ .operation = "fmin" } }, else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { + .Max => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .ternary = " > " } else .{ .builtin = .{ .operation = "max" }, }, .Float => .{ .float_op = .{ .operation = "fmax" } }, else => unreachable, }, - .Add => switch (scalar_ty.zigTypeTag()) { + .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .infix = " += " } else .{ .builtin = .{ .operation = "addw", .info = .bits }, }, .Float => .{ .builtin = .{ .operation = "add" } }, else => unreachable, }, - .Mul => switch (scalar_ty.zigTypeTag()) { + .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => if (use_operator) .{ .infix = " *= " } else .{ .builtin = .{ .operation = "mulw", .info = .bits }, }, @@ -6680,22 +6687,22 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { .Or, .Xor, .Add => Value.zero, - .And => switch (scalar_ty.zigTypeTag()) { + .And => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, - else => switch (scalar_ty.intInfo(target).signedness) { - .unsigned => try scalar_ty.maxInt(stack.get(), target), + else => switch (scalar_ty.intInfo(mod).signedness) { + .unsigned => try scalar_ty.maxInt(stack.get(), mod), .signed => Value.negative_one, }, }, - .Min => switch (scalar_ty.zigTypeTag()) { + .Min => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, - .Int => try scalar_ty.maxInt(stack.get(), target), + .Int => try scalar_ty.maxInt(stack.get(), mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { + .Max => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.zero, - .Int => try scalar_ty.minInt(stack.get(), target), + .Int => try scalar_ty.minInt(stack.get(), mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, @@ -6753,6 +6760,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const inst_ty = f.air.typeOfIndex(inst); const len = @intCast(usize, inst_ty.arrayLen()); @@ -6770,11 +6778,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } } - const target = f.object.dg.module.getTarget(); - const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - switch (inst_ty.zigTypeTag()) { + switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => { const elem_ty = inst_ty.childType(); const a = try Assignment.init(f, elem_ty); @@ -6799,7 +6805,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .Auto, .Extern => for (resolved_elements, 0..) |element, field_i| { if (inst_ty.structFieldIsComptime(field_i)) continue; const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const a = try Assignment.start(f, writer, field_ty); try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple()) @@ -6813,13 +6819,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { .Packed => { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - const int_info = inst_ty.intInfo(target); + const int_info = inst_ty.intInfo(mod); - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); @@ -6828,7 +6830,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (0..elements.len) |field_i| { if (inst_ty.structFieldIsComptime(field_i)) continue; const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) { try writer.writeAll("zig_or_"); @@ -6841,7 +6843,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { for (resolved_elements, 0..) |element, field_i| { if (inst_ty.structFieldIsComptime(field_i)) continue; const field_ty = inst_ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(", "); // TODO: Skip this entire shift if val is 0? @@ -6849,13 +6851,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); try writer.writeByte('('); - if (inst_ty.isAbiInt() and (field_ty.isAbiInt() or field_ty.isPtrAtRuntime())) { + if (inst_ty.isAbiInt(mod) and (field_ty.isAbiInt(mod) or field_ty.isPtrAtRuntime(mod))) { try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument); } else { try writer.writeByte('('); try f.renderType(writer, inst_ty); try writer.writeByte(')'); - if (field_ty.isPtrAtRuntime()) { + if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try f.renderType(writer, switch (int_info.signedness) { .unsigned => Type.usize, @@ -6872,7 +6874,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeByte(')'); if (!empty) try writer.writeByte(')'); - bit_offset_val_pl.data += field_ty.bitSize(target); + bit_offset_val_pl.data += field_ty.bitSize(mod); empty = false; } @@ -6886,11 +6888,11 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = f.air.typeOfIndex(inst); - const target = f.object.dg.module.getTarget(); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_name = union_obj.fields.keys()[extra.field_index]; const payload_ty = f.air.typeOf(extra.init); @@ -6908,7 +6910,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { } const field: CValue = if (union_ty.unionTagTypeSafety()) |tag_ty| field: { - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name).?; @@ -6991,13 +6993,14 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { } fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const operand_ty = f.air.typeOf(un_op); - const scalar_ty = operand_ty.scalarType(); + const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, operand_ty); @@ -7016,13 +7019,14 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7043,6 +7047,7 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal } fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue { + const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const lhs = try f.resolveInst(bin_op.lhs); @@ -7050,7 +7055,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7074,6 +7079,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa } fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data; @@ -7083,7 +7089,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand }); const inst_ty = f.air.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(); + const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7279,8 +7285,9 @@ fn signAbbrev(signedness: std.builtin.Signedness) u8 { }; } -fn compilerRtAbbrev(ty: Type, target: std.Target) []const u8 { - return if (ty.isInt()) switch (ty.intInfo(target).bits) { +fn compilerRtAbbrev(ty: Type, mod: *Module) []const u8 { + const target = mod.getTarget(); + return if (ty.isInt(mod)) switch (ty.intInfo(mod).bits) { 1...32 => "si", 33...64 => "di", 65...128 => "ti", @@ -7407,7 +7414,7 @@ fn undefPattern(comptime IntType: type) IntType { const FormatIntLiteralContext = struct { dg: *DeclGen, - int_info: std.builtin.Type.Int, + int_info: InternPool.Key.IntType, kind: CType.Kind, cty: CType, val: Value, @@ -7418,7 +7425,8 @@ fn formatIntLiteral( options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - const target = data.dg.module.getTarget(); + const mod = data.dg.module; + const target = mod.getTarget(); const ExpectedContents = struct { const base = 10; @@ -7449,7 +7457,7 @@ fn formatIntLiteral( }; undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits); break :blk undef_int.toConst(); - } else data.val.toBigInt(&int_buf, target); + } else data.val.toBigInt(&int_buf, mod); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); const c_bits = @intCast(usize, data.cty.byteSize(data.dg.ctypes.set, target) * 8); @@ -7684,7 +7692,8 @@ const Vectorize = struct { index: CValue = .none, pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { - return if (ty.zigTypeTag() == .Vector) index: { + const mod = f.object.dg.module; + return if (ty.zigTypeTag(mod) == .Vector) index: { var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() }; const local = try f.allocLocal(inst, Type.usize); @@ -7727,10 +7736,10 @@ const LowerFnRetTyBuffer = struct { values: [1]Value, payload: Type.Payload.AnonStruct, }; -fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) Type { - if (ret_ty.zigTypeTag() == .NoReturn) return Type.initTag(.noreturn); +fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) Type { + if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.initTag(.noreturn); - if (lowersToArray(ret_ty, target)) { + if (lowersToArray(ret_ty, mod)) { buffer.names = [1][]const u8{"array"}; buffer.types = [1]Type{ret_ty}; buffer.values = [1]Value{Value.initTag(.unreachable_value)}; @@ -7742,13 +7751,13 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) T return Type.initPayload(&buffer.payload.base); } - return if (ret_ty.hasRuntimeBitsIgnoreComptime()) ret_ty else Type.void; + return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void; } -fn lowersToArray(ty: Type, target: std.Target) bool { - return switch (ty.zigTypeTag()) { +fn lowersToArray(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => return true, - else => return ty.isAbiInt() and toCIntBits(@intCast(u32, ty.bitSize(target))) == null, + else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null, }; } diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 6116d070e6..5064b84b1d 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -292,19 +292,19 @@ pub const CType = extern union { .abi = std.math.log2_int(u32, abi_alignment), }; } - pub fn abiAlign(ty: Type, target: Target) AlignAs { - const abi_align = ty.abiAlignment(target); + pub fn abiAlign(ty: Type, mod: *const Module) AlignAs { + const abi_align = ty.abiAlignment(mod); return init(abi_align, abi_align); } - pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs { + pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *const Module) AlignAs { return init( - struct_ty.structFieldAlign(field_i, target), - struct_ty.structFieldType(field_i).abiAlignment(target), + struct_ty.structFieldAlign(field_i, mod), + struct_ty.structFieldType(field_i).abiAlignment(mod), ); } - pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs { + pub fn unionPayloadAlign(union_ty: Type, mod: *const Module) AlignAs { const union_obj = union_ty.cast(Type.Payload.Union).?.data; - const union_payload_align = union_obj.abiAlignment(target, false); + const union_payload_align = union_obj.abiAlignment(mod, false); return init(union_payload_align, union_payload_align); } @@ -344,8 +344,8 @@ pub const CType = extern union { return self.map.entries.items(.hash)[index - Tag.no_payload_count]; } - pub fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index { - const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } }; + pub fn typeToIndex(self: Set, ty: Type, mod: *Module, kind: Kind) ?Index { + const lookup = Convert.Lookup{ .imm = .{ .set = &self, .mod = mod } }; var convert: Convert = undefined; convert.initType(ty, kind, lookup) catch unreachable; @@ -405,7 +405,7 @@ pub const CType = extern union { ); if (!gop.found_existing) { errdefer _ = self.set.map.pop(); - gop.key_ptr.* = try createFromConvert(self, ty, lookup.getTarget(), kind, convert); + gop.key_ptr.* = try createFromConvert(self, ty, lookup.getModule(), kind, convert); } if (std.debug.runtime_safety) { const adapter = TypeAdapter64{ @@ -1236,10 +1236,10 @@ pub const CType = extern union { } pub const Lookup = union(enum) { - fail: Target, + fail: *Module, imm: struct { set: *const Store.Set, - target: Target, + mod: *Module, }, mut: struct { promoted: *Store.Promoted, @@ -1254,10 +1254,14 @@ pub const CType = extern union { } pub fn getTarget(self: @This()) Target { + return self.getModule().getTarget(); + } + + pub fn getModule(self: @This()) *Module { return switch (self) { - .fail => |target| target, - .imm => |imm| imm.target, - .mut => |mut| mut.mod.getTarget(), + .fail => |mod| mod, + .imm => |imm| imm.mod, + .mut => |mut| mut.mod, }; } @@ -1272,7 +1276,7 @@ pub const CType = extern union { pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index { return switch (self) { .fail => null, - .imm => |imm| imm.set.typeToIndex(ty, imm.target, kind), + .imm => |imm| imm.set.typeToIndex(ty, imm.mod, kind), .mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind), }; } @@ -1284,7 +1288,7 @@ pub const CType = extern union { pub fn freeze(self: @This()) @This() { return switch (self) { .fail, .imm => self, - .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .target = self.getTarget() } }, + .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .mod = mut.mod } }, }; } }; @@ -1338,7 +1342,7 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "array", .type = array_idx, - .alignas = AlignAs.abiAlign(ty, lookup.getTarget()), + .alignas = AlignAs.abiAlign(ty, lookup.getModule()), }; self.initAnon(kind, fwd_idx, 1); } else self.init(switch (kind) { @@ -1350,12 +1354,12 @@ pub const CType = extern union { } pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { - const target = lookup.getTarget(); + const mod = lookup.getModule(); self.* = undefined; - if (!ty.isFnOrHasRuntimeBitsIgnoreComptime()) + if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) self.init(.void) - else if (ty.isAbiInt()) switch (ty.tag()) { + else if (ty.isAbiInt(mod)) switch (ty.tag()) { .usize => self.init(.uintptr_t), .isize => self.init(.intptr_t), .c_char => self.init(.char), @@ -1367,13 +1371,13 @@ pub const CType = extern union { .c_ulong => self.init(.@"unsigned long"), .c_longlong => self.init(.@"long long"), .c_ulonglong => self.init(.@"unsigned long long"), - else => switch (tagFromIntInfo(ty.intInfo(target))) { + else => switch (tagFromIntInfo(ty.intInfo(mod))) { .void => unreachable, else => |t| self.init(t), .array => switch (kind) { .forward, .complete, .global => { - const abi_size = ty.abiSize(target); - const abi_align = ty.abiAlignment(target); + const abi_size = ty.abiSize(mod); + const abi_align = ty.abiAlignment(mod); self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{ .len = @divExact(abi_size, abi_align), .elem_type = tagFromIntInfo(.{ @@ -1389,7 +1393,7 @@ pub const CType = extern union { .payload => unreachable, }, }, - } else switch (ty.zigTypeTag()) { + } else switch (ty.zigTypeTag(mod)) { .Frame => unreachable, .AnyFrame => unreachable, @@ -1434,12 +1438,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "ptr", .type = ptr_idx, - .alignas = AlignAs.abiAlign(ptr_ty, target), + .alignas = AlignAs.abiAlign(ptr_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "len", .type = Tag.uintptr_t.toIndex(), - .alignas = AlignAs.abiAlign(Type.usize, target), + .alignas = AlignAs.abiAlign(Type.usize, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1462,12 +1466,8 @@ pub const CType = extern union { }, }; - var host_int_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = info.host_size * 8, - }; const pointee_ty = if (info.host_size > 0 and info.vector_index == .none) - Type.initPayload(&host_int_pl.base) + try mod.intType(.unsigned, info.host_size * 8) else info.pointee_type; @@ -1490,11 +1490,9 @@ pub const CType = extern union { if (ty.castTag(.@"struct")) |struct_obj| { try self.initType(struct_obj.data.backing_int_ty, kind, lookup); } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.bitSize(target)), - }; - try self.initType(Type.initPayload(&buf.base), kind, lookup); + const bits = @intCast(u16, ty.bitSize(mod)); + const int_ty = try mod.intType(.unsigned, bits); + try self.initType(int_ty, kind, lookup); } } else if (ty.isTupleOrAnonStruct()) { if (lookup.isMutable()) { @@ -1505,7 +1503,7 @@ pub const CType = extern union { }) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; _ = try lookup.typeToIndex(field_ty, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter => .complete, @@ -1555,7 +1553,7 @@ pub const CType = extern union { self.storage.anon.fields[field_count] = .{ .name = "payload", .type = payload_idx.?, - .alignas = AlignAs.unionPayloadAlign(ty, target), + .alignas = AlignAs.unionPayloadAlign(ty, mod), }; field_count += 1; } @@ -1563,7 +1561,7 @@ pub const CType = extern union { self.storage.anon.fields[field_count] = .{ .name = "tag", .type = tag_idx.?, - .alignas = AlignAs.abiAlign(tag_ty.?, target), + .alignas = AlignAs.abiAlign(tag_ty.?, mod), }; field_count += 1; } @@ -1576,7 +1574,7 @@ pub const CType = extern union { } }; self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) }; } else self.init(.@"struct"); - } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes()) { + } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes(mod)) { self.init(.void); } else { var is_packed = false; @@ -1586,9 +1584,9 @@ pub const CType = extern union { else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_align = AlignAs.fieldAlign(ty, field_i, target); + const field_align = AlignAs.fieldAlign(ty, field_i, mod); if (field_align.@"align" < field_align.abi) { is_packed = true; if (!lookup.isMutable()) break; @@ -1643,8 +1641,8 @@ pub const CType = extern union { .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - if (ty.optionalReprIsPayload()) { + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (ty.optionalReprIsPayload(mod)) { try self.initType(payload_ty, kind, lookup); } else if (switch (kind) { .forward, .forward_parameter => @as(Index, undefined), @@ -1661,12 +1659,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = AlignAs.abiAlign(payload_ty, target), + .alignas = AlignAs.abiAlign(payload_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "is_null", .type = Tag.bool.toIndex(), - .alignas = AlignAs.abiAlign(Type.bool, target), + .alignas = AlignAs.abiAlign(Type.bool, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1699,12 +1697,12 @@ pub const CType = extern union { self.storage.anon.fields[0] = .{ .name = "payload", .type = payload_idx, - .alignas = AlignAs.abiAlign(payload_ty, target), + .alignas = AlignAs.abiAlign(payload_ty, mod), }; self.storage.anon.fields[1] = .{ .name = "error", .type = error_idx, - .alignas = AlignAs.abiAlign(error_ty, target), + .alignas = AlignAs.abiAlign(error_ty, mod), }; self.initAnon(kind, fwd_idx, 2); } else self.init(switch (kind) { @@ -1733,7 +1731,7 @@ pub const CType = extern union { }; _ = try lookup.typeToIndex(info.return_type, param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; _ = try lookup.typeToIndex(param_type, param_kind); } } @@ -1900,16 +1898,16 @@ pub const CType = extern union { } } - fn createFromType(store: *Store.Promoted, ty: Type, target: Target, kind: Kind) !CType { + fn createFromType(store: *Store.Promoted, ty: Type, mod: *const Module, kind: Kind) !CType { var convert: Convert = undefined; - try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .target = target } }); - return createFromConvert(store, ty, target, kind, &convert); + try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } }); + return createFromConvert(store, ty, mod, kind, &convert); } fn createFromConvert( store: *Store.Promoted, ty: Type, - target: Target, + mod: *Module, kind: Kind, convert: Convert, ) !CType { @@ -1930,7 +1928,7 @@ pub const CType = extern union { .packed_struct, .packed_union, => { - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); const fields_len = switch (zig_ty_tag) { .Struct => ty.structFieldCount(), .Union => ty.unionFields().count(), @@ -1941,7 +1939,7 @@ pub const CType = extern union { for (0..fields_len) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; c_fields_len += 1; } @@ -1950,7 +1948,7 @@ pub const CType = extern union { for (0..fields_len) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; fields_pl[c_field_i] = .{ @@ -1962,12 +1960,12 @@ pub const CType = extern union { .Union => ty.unionFields().keys()[field_i], else => unreachable, }), - .type = store.set.typeToIndex(field_ty, target, switch (kind) { + .type = store.set.typeToIndex(field_ty, mod, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter, .payload => .complete, .global => .global, }).?, - .alignas = AlignAs.fieldAlign(ty, field_i, target), + .alignas = AlignAs.fieldAlign(ty, field_i, mod), }; } @@ -2004,7 +2002,7 @@ pub const CType = extern union { const struct_pl = try arena.create(Payload.Aggregate); struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ .fields = fields_pl, - .fwd_decl = store.set.typeToIndex(ty, target, .forward).?, + .fwd_decl = store.set.typeToIndex(ty, mod, .forward).?, } }; return initPayload(struct_pl); }, @@ -2026,21 +2024,21 @@ pub const CType = extern union { var c_params_len: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; c_params_len += 1; } const params_pl = try arena.alloc(Index, c_params_len); var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; - params_pl[c_param_i] = store.set.typeToIndex(param_type, target, param_kind).?; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; + params_pl[c_param_i] = store.set.typeToIndex(param_type, mod, param_kind).?; c_param_i += 1; } const fn_pl = try arena.create(Payload.Function); fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .return_type = store.set.typeToIndex(info.return_type, target, param_kind).?, + .return_type = store.set.typeToIndex(info.return_type, mod, param_kind).?, .param_types = params_pl, } }; return initPayload(fn_pl); @@ -2067,12 +2065,12 @@ pub const CType = extern union { } pub fn eql(self: @This(), ty: Type, cty: CType) bool { + const mod = self.lookup.getModule(); switch (self.convert.value) { .cty => |c| return c.eql(cty), .tag => |t| { if (t != cty.tag()) return false; - const target = self.lookup.getTarget(); switch (t) { .fwd_anon_struct, .fwd_anon_union, @@ -2084,7 +2082,7 @@ pub const CType = extern union { ]u8 = undefined; const c_fields = cty.cast(Payload.Fields).?.data; - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); var c_field_i: usize = 0; for (0..switch (zig_ty_tag) { .Struct => ty.structFieldCount(), @@ -2093,7 +2091,7 @@ pub const CType = extern union { }) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; const c_field = &c_fields[c_field_i]; @@ -2113,7 +2111,7 @@ pub const CType = extern union { else => unreachable, }, mem.span(c_field.name), - ) or AlignAs.fieldAlign(ty, field_i, target).@"align" != + ) or AlignAs.fieldAlign(ty, field_i, mod).@"align" != c_field.alignas.@"align") return false; } return true; @@ -2146,7 +2144,7 @@ pub const CType = extern union { .function, .varargs_function, => { - if (ty.zigTypeTag() != .Fn) return false; + if (ty.zigTypeTag(mod) != .Fn) return false; const info = ty.fnInfo(); assert(!info.is_generic); @@ -2162,7 +2160,7 @@ pub const CType = extern union { var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; if (c_param_i >= data.param_types.len) return false; const param_cty = data.param_types[c_param_i]; @@ -2202,7 +2200,7 @@ pub const CType = extern union { .tag => |t| { autoHash(hasher, t); - const target = self.lookup.getTarget(); + const mod = self.lookup.getModule(); switch (t) { .fwd_anon_struct, .fwd_anon_union, @@ -2211,15 +2209,15 @@ pub const CType = extern union { std.fmt.count("f{}", .{std.math.maxInt(usize)}) ]u8 = undefined; - const zig_ty_tag = ty.zigTypeTag(); - for (0..switch (ty.zigTypeTag()) { + const zig_ty_tag = ty.zigTypeTag(mod); + for (0..switch (ty.zigTypeTag(mod)) { .Struct => ty.structFieldCount(), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i); if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or - !field_ty.hasRuntimeBitsIgnoreComptime()) continue; + !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; self.updateHasherRecurse(hasher, field_ty, switch (self.kind) { .forward, .forward_parameter => .forward, @@ -2234,7 +2232,7 @@ pub const CType = extern union { .Union => ty.unionFields().keys()[field_i], else => unreachable, }); - autoHash(hasher, AlignAs.fieldAlign(ty, field_i, target).@"align"); + autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align"); } }, @@ -2271,7 +2269,7 @@ pub const CType = extern union { self.updateHasherRecurse(hasher, info.return_type, param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; self.updateHasherRecurse(hasher, param_type, param_kind); } }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index beb2309455..c3d3da0d32 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -598,7 +598,7 @@ pub const Object = struct { }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const slice_alignment = slice_ty.abiAlignment(target); + const slice_alignment = slice_ty.abiAlignment(mod); const error_name_list = mod.error_name_list.items; const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len); @@ -880,28 +880,28 @@ pub const Object = struct { pub fn updateFunc( o: *Object, - module: *Module, + mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness, ) !void { const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); - const target = module.getTarget(); + const decl = mod.declPtr(decl_index); + const target = mod.getTarget(); var dg: DeclGen = .{ .context = o.context, .object = o, - .module = module, + .module = mod, .decl_index = decl_index, .decl = decl, .err_msg = null, - .gpa = module.gpa, + .gpa = mod.gpa, }; const llvm_func = try dg.resolveLlvmFunction(decl_index); - if (module.align_stack_fns.get(func)) |align_info| { + if (mod.align_stack_fns.get(func)) |align_info| { dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment); dg.addFnAttr(llvm_func, "noinline"); } else { @@ -922,7 +922,7 @@ pub const Object = struct { } // TODO: disable this if safety is off for the function scope - const ssp_buf_size = module.comp.bin_file.options.stack_protector; + const ssp_buf_size = mod.comp.bin_file.options.stack_protector; if (ssp_buf_size != 0) { var buf: [12]u8 = undefined; const arg = std.fmt.bufPrintZ(&buf, "{d}", .{ssp_buf_size}) catch unreachable; @@ -931,7 +931,7 @@ pub const Object = struct { } // TODO: disable this if safety is off for the function scope - if (module.comp.bin_file.options.stack_check) { + if (mod.comp.bin_file.options.stack_check) { dg.addFnAttrString(llvm_func, "probe-stack", "__zig_probe_stack"); } else if (target.os.tag == .uefi) { dg.addFnAttrString(llvm_func, "no-stack-arg-probe", ""); @@ -954,17 +954,17 @@ pub const Object = struct { // This gets the LLVM values from the function and stores them in `dg.args`. const fn_info = decl.ty.fnInfo(); - const sret = firstParamSRet(fn_info, target); + const sret = firstParamSRet(fn_info, mod); const ret_ptr = if (sret) llvm_func.getParam(0) else null; const gpa = dg.gpa; - if (ccAbiPromoteInt(fn_info.cc, target, fn_info.return_type)) |s| switch (s) { + if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type)) |s| switch (s) { .signed => dg.addAttr(llvm_func, 0, "signext"), .unsigned => dg.addAttr(llvm_func, 0, "zeroext"), }; - const err_return_tracing = fn_info.return_type.isError() and - module.comp.bin_file.options.error_return_tracing; + const err_return_tracing = fn_info.return_type.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; const err_ret_trace = if (err_return_tracing) llvm_func.getParam(@boolToInt(ret_ptr != null)) @@ -989,8 +989,8 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = param.typeOf(); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); const store_inst = builder.buildStore(param, arg_ptr); @@ -1007,14 +1007,14 @@ pub const Object = struct { const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addByRefParamAttrs(llvm_func, llvm_arg_i, alignment, it.byval_attr, param_llvm_ty); llvm_arg_i += 1; try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(param); } else { const load_inst = builder.buildLoad(param_llvm_ty, param, ""); @@ -1026,14 +1026,14 @@ pub const Object = struct { const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addArgAttr(llvm_func, llvm_arg_i, "noundef"); llvm_arg_i += 1; try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(param); } else { const load_inst = builder.buildLoad(param_llvm_ty, param, ""); @@ -1048,10 +1048,10 @@ pub const Object = struct { llvm_arg_i += 1; const param_llvm_ty = try dg.lowerType(param_ty); - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = dg.context.intType(abi_size * 8); const alignment = @max( - param_ty.abiAlignment(target), + param_ty.abiAlignment(mod), dg.object.target_data.abiAlignmentOfType(int_llvm_ty), ); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); @@ -1060,7 +1060,7 @@ pub const Object = struct { try args.ensureUnusedCapacity(1); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { args.appendAssumeCapacity(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1078,7 +1078,7 @@ pub const Object = struct { dg.addArgAttr(llvm_func, llvm_arg_i, "noalias"); } } - if (param_ty.zigTypeTag() != .Optional) { + if (param_ty.zigTypeTag(mod) != .Optional) { dg.addArgAttr(llvm_func, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -1087,7 +1087,7 @@ pub const Object = struct { if (ptr_info.@"align" != 0) { dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); dg.addArgAttrInt(llvm_func, llvm_arg_i, "align", elem_align); } const ptr_param = llvm_func.getParam(llvm_arg_i); @@ -1105,7 +1105,7 @@ pub const Object = struct { const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); - const param_alignment = param_ty.abiAlignment(target); + const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); const llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); for (field_types, 0..) |_, field_i_usize| { @@ -1117,7 +1117,7 @@ pub const Object = struct { store_inst.setAlignment(target.ptrBitWidth() / 8); } - const is_by_ref = isByRef(param_ty); + const is_by_ref = isByRef(param_ty, mod); const loaded = if (is_by_ref) arg_ptr else l: { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); load_inst.setAlignment(param_alignment); @@ -1139,11 +1139,11 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try args.append(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1157,11 +1157,11 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, alignment, target); _ = builder.buildStore(param, arg_ptr); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try args.append(arg_ptr); } else { const load_inst = builder.buildLoad(param_llvm_ty, arg_ptr, ""); @@ -1180,7 +1180,7 @@ pub const Object = struct { const line_number = decl.src_line + 1; const is_internal_linkage = decl.val.tag() != .extern_fn and - !module.decl_exports.contains(decl_index); + !mod.decl_exports.contains(decl_index); const noret_bit: c_uint = if (fn_info.return_type.isNoReturn()) llvm.DIFlags.NoReturn else @@ -1196,7 +1196,7 @@ pub const Object = struct { true, // is definition line_number + func.lbrace_line, // scope line llvm.DIFlags.StaticMember | noret_bit, - module.comp.bin_file.options.optimize_mode != .Debug, + mod.comp.bin_file.options.optimize_mode != .Debug, null, // decl_subprogram ); try dg.object.di_map.put(gpa, decl, subprogram.toNode()); @@ -1219,7 +1219,7 @@ pub const Object = struct { .func_inst_table = .{}, .llvm_func = llvm_func, .blocks = .{}, - .single_threaded = module.comp.bin_file.options.single_threaded, + .single_threaded = mod.comp.bin_file.options.single_threaded, .di_scope = di_scope, .di_file = di_file, .base_line = dg.decl.src_line, @@ -1232,14 +1232,14 @@ pub const Object = struct { fg.genBody(air.getMainBody()) catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?); + try mod.failed_decls.put(mod.gpa, decl_index, dg.err_msg.?); dg.err_msg = null; return; }, else => |e| return e, }; - try o.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + try o.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn updateDecl(self: *Object, module: *Module, decl_index: Module.Decl.Index) !void { @@ -1275,37 +1275,40 @@ pub const Object = struct { pub fn updateDeclExports( self: *Object, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { + const gpa = mod.gpa; // If the module does not already have the function, we ignore this function call // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. const llvm_global = self.decl_map.get(decl_index) orelse return; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.isExtern()) { - const is_wasm_fn = module.getTarget().isWasm() and try decl.isFunction(); + const is_wasm_fn = mod.getTarget().isWasm() and try decl.isFunction(mod); const mangle_name = is_wasm_fn and decl.getExternFn().?.lib_name != null and !std.mem.eql(u8, std.mem.sliceTo(decl.getExternFn().?.lib_name.?, 0), "c"); const decl_name = if (mangle_name) name: { - const tmp = try std.fmt.allocPrintZ(module.gpa, "{s}|{s}", .{ decl.name, decl.getExternFn().?.lib_name.? }); + const tmp = try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ + decl.name, decl.getExternFn().?.lib_name.?, + }); break :name tmp.ptr; } else decl.name; - defer if (mangle_name) module.gpa.free(std.mem.sliceTo(decl_name, 0)); + defer if (mangle_name) gpa.free(std.mem.sliceTo(decl_name, 0)); llvm_global.setValueName(decl_name); if (self.getLlvmGlobal(decl_name)) |other_global| { if (other_global != llvm_global) { log.debug("updateDeclExports isExtern()=true setValueName({s}) conflict", .{decl.name}); - try self.extern_collisions.put(module.gpa, decl_index, {}); + try self.extern_collisions.put(gpa, decl_index, {}); } } llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); if (self.di_map.get(decl)) |di_node| { - if (try decl.isFunction()) { + if (try decl.isFunction(mod)) { const di_func = @ptrCast(*llvm.DISubprogram, di_node); const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name)); di_func.replaceLinkageName(linkage_name); @@ -1329,9 +1332,9 @@ pub const Object = struct { const exp_name = exports[0].options.name; llvm_global.setValueName2(exp_name.ptr, exp_name.len); llvm_global.setUnnamedAddr(.False); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); if (self.di_map.get(decl)) |di_node| { - if (try decl.isFunction()) { + if (try decl.isFunction(mod)) { const di_func = @ptrCast(*llvm.DISubprogram, di_node); const linkage_name = llvm.MDString.get(self.context, exp_name.ptr, exp_name.len); di_func.replaceLinkageName(linkage_name); @@ -1353,8 +1356,8 @@ pub const Object = struct { .protected => llvm_global.setVisibility(.Protected), } if (exports[0].options.section) |section| { - const section_z = try module.gpa.dupeZ(u8, section); - defer module.gpa.free(section_z); + const section_z = try gpa.dupeZ(u8, section); + defer gpa.free(section_z); llvm_global.setSection(section_z); } if (decl.val.castTag(.variable)) |variable| { @@ -1370,8 +1373,8 @@ pub const Object = struct { // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. for (exports[1..]) |exp| { - const exp_name_z = try module.gpa.dupeZ(u8, exp.options.name); - defer module.gpa.free(exp_name_z); + const exp_name_z = try gpa.dupeZ(u8, exp.options.name); + defer gpa.free(exp_name_z); if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| { alias.setAliasee(llvm_global); @@ -1385,14 +1388,14 @@ pub const Object = struct { } } } else { - const fqn = try decl.getFullyQualifiedName(module); - defer module.gpa.free(fqn); + const fqn = try decl.getFullyQualifiedName(mod); + defer gpa.free(fqn); llvm_global.setValueName2(fqn.ptr, fqn.len); llvm_global.setLinkage(.Internal); - if (module.wantDllExports()) llvm_global.setDLLStorageClass(.Default); + if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); llvm_global.setUnnamedAddr(.True); if (decl.val.castTag(.variable)) |variable| { - const single_threaded = module.comp.bin_file.options.single_threaded; + const single_threaded = mod.comp.bin_file.options.single_threaded; if (variable.data.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { @@ -1479,14 +1482,15 @@ pub const Object = struct { const gpa = o.gpa; const target = o.target; const dib = o.di_builder.?; - switch (ty.zigTypeTag()) { + const mod = o.module; + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => { const di_type = dib.createBasicType("void", 0, DW.ATE.signed); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); return di_type; }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); assert(info.bits != 0); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -1494,7 +1498,7 @@ pub const Object = struct { .signed => DW.ATE.signed, .unsigned => DW.ATE.unsigned, }; - const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types + const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types const di_type = dib.createBasicType(name, di_bits, dwarf_encoding); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); return di_type; @@ -1503,7 +1507,7 @@ pub const Object = struct { const owner_decl_index = ty.getOwnerDecl(); const owner_decl = o.module.declPtr(owner_decl_index); - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` // means we can't use `gop` anymore. @@ -1522,9 +1526,8 @@ pub const Object = struct { }; const field_index_val = Value.initPayload(&buf_field_index.base); - var buffer: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&buffer); - const int_info = ty.intInfo(target); + const int_ty = ty.intTagType(); + const int_info = ty.intInfo(mod); assert(int_info.bits != 0); for (field_names, 0..) |field_name, i| { @@ -1536,7 +1539,7 @@ pub const Object = struct { const field_int_val = field_index_val.enumToInt(ty, &buf_u64); var bigint_space: Value.BigIntSpace = undefined; - const bigint = field_int_val.toBigInt(&bigint_space, target); + const bigint = field_int_val.toBigInt(&bigint_space, mod); if (bigint.limbs.len == 1) { enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned); @@ -1566,8 +1569,8 @@ pub const Object = struct { name, di_file, owner_decl.src_node + 1, - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, enumerators.ptr, @intCast(c_int, enumerators.len), try o.lowerDebugType(int_ty, .full), @@ -1604,7 +1607,7 @@ pub const Object = struct { !ptr_info.mutable or ptr_info.@"volatile" or ptr_info.size == .Many or ptr_info.size == .C or - !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) + !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) { var payload: Type.Payload.Pointer = .{ .data = .{ @@ -1623,7 +1626,7 @@ pub const Object = struct { }, }, }; - if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime()) { + if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) { payload.data.pointee_type = Type.anyopaque; } const bland_ptr_ty = Type.initPayload(&payload.base); @@ -1657,10 +1660,10 @@ pub const Object = struct { break :blk fwd_decl; }; - const ptr_size = ptr_ty.abiSize(target); - const ptr_align = ptr_ty.abiAlignment(target); - const len_size = len_ty.abiSize(target); - const len_align = len_ty.abiAlignment(target); + const ptr_size = ptr_ty.abiSize(mod); + const ptr_align = ptr_ty.abiAlignment(mod); + const len_size = len_ty.abiSize(mod); + const len_align = len_ty.abiAlignment(mod); var offset: u64 = 0; offset += ptr_size; @@ -1697,8 +1700,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1719,7 +1722,7 @@ pub const Object = struct { const ptr_di_ty = dib.createPointerType( elem_di_ty, target.ptrBitWidth(), - ty.ptrAlignment(target) * 8, + ty.ptrAlignment(mod) * 8, name, ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. @@ -1750,8 +1753,8 @@ pub const Object = struct { }, .Array => { const array_di_ty = dib.createArrayType( - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, try o.lowerDebugType(ty.childType(), .full), @intCast(c_int, ty.arrayLen()), ); @@ -1760,14 +1763,14 @@ pub const Object = struct { return array_di_ty; }, .Vector => { - const elem_ty = ty.elemType2(); + const elem_ty = ty.elemType2(mod); // Vector elements cannot be padded since that would make // @bitSizOf(elem) * len > @bitSizOf(vec). // Neither gdb nor lldb seem to be able to display non-byte sized // vectors properly. - const elem_di_type = switch (elem_ty.zigTypeTag()) { + const elem_di_type = switch (elem_ty.zigTypeTag(mod)) { .Int => blk: { - const info = elem_ty.intInfo(target); + const info = elem_ty.intInfo(mod); assert(info.bits != 0); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -1782,8 +1785,8 @@ pub const Object = struct { }; const vector_di_ty = dib.createVectorType( - ty.abiSize(target) * 8, - ty.abiAlignment(target) * 8, + ty.abiSize(mod) * 8, + ty.abiAlignment(mod) * 8, elem_di_type, ty.vectorLen(), ); @@ -1796,13 +1799,13 @@ pub const Object = struct { defer gpa.free(name); var buf: Type.Payload.ElemType = undefined; const child_ty = ty.optionalChild(&buf); - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { + if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { const di_bits = 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { const ptr_di_ty = try o.lowerDebugType(child_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module }); @@ -1826,10 +1829,10 @@ pub const Object = struct { }; const non_null_ty = Type.u8; - const payload_size = child_ty.abiSize(target); - const payload_align = child_ty.abiAlignment(target); - const non_null_size = non_null_ty.abiSize(target); - const non_null_align = non_null_ty.abiAlignment(target); + const payload_size = child_ty.abiSize(mod); + const payload_align = child_ty.abiAlignment(mod); + const non_null_size = non_null_ty.abiSize(mod); + const non_null_align = non_null_ty.abiAlignment(mod); var offset: u64 = 0; offset += payload_size; @@ -1866,8 +1869,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1883,7 +1886,7 @@ pub const Object = struct { }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module }); @@ -1907,10 +1910,10 @@ pub const Object = struct { break :blk fwd_decl; }; - const error_size = Type.anyerror.abiSize(target); - const error_align = Type.anyerror.abiAlignment(target); - const payload_size = payload_ty.abiSize(target); - const payload_align = payload_ty.abiAlignment(target); + const error_size = Type.anyerror.abiSize(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const payload_size = payload_ty.abiSize(mod); + const payload_align = payload_ty.abiAlignment(mod); var error_index: u32 = undefined; var payload_index: u32 = undefined; @@ -1957,8 +1960,8 @@ pub const Object = struct { name.ptr, di_file, line, - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &fields, @@ -1988,12 +1991,12 @@ pub const Object = struct { const struct_obj = payload.data; if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { assert(struct_obj.haveLayout()); - const info = struct_obj.backing_int_ty.intInfo(target); + const info = struct_obj.backing_int_ty.intInfo(mod); const dwarf_encoding: c_uint = switch (info.signedness) { .signed => DW.ATE.signed, .unsigned => DW.ATE.unsigned, }; - const di_bits = ty.abiSize(target) * 8; // lldb cannot handle non-byte sized types + const di_bits = ty.abiSize(mod) * 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, dwarf_encoding); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; @@ -2026,10 +2029,10 @@ pub const Object = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - const field_size = field_ty.abiSize(target); - const field_align = field_ty.abiAlignment(target); + const field_size = field_ty.abiSize(mod); + const field_align = field_ty.abiAlignment(mod); const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = field_offset + field_size; @@ -2057,8 +2060,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from di_fields.items.ptr, @@ -2093,7 +2096,7 @@ pub const Object = struct { } } - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { const owner_decl_index = ty.getOwnerDecl(); const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, struct_di_ty); @@ -2114,11 +2117,11 @@ pub const Object = struct { comptime assert(struct_layout_version == 2); var offset: u64 = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(); + var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_size = field.ty.abiSize(target); - const field_align = field.alignment(target, layout); + const field_size = field.ty.abiSize(mod); + const field_align = field.alignment(mod, layout); const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = field_offset + field_size; @@ -2143,8 +2146,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from di_fields.items.ptr, @@ -2179,7 +2182,7 @@ pub const Object = struct { }; const union_obj = ty.cast(Type.Payload.Union).?.data; - if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime()) { + if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime(mod)) { const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, union_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` @@ -2188,7 +2191,7 @@ pub const Object = struct { return union_di_ty; } - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { const tag_di_ty = try o.lowerDebugType(union_obj.tag_ty, .full); @@ -2198,8 +2201,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &di_fields, @@ -2225,10 +2228,10 @@ pub const Object = struct { const field_name = kv.key_ptr.*; const field = kv.value_ptr.*; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_size = field.ty.abiSize(target); - const field_align = field.normalAlignment(target); + const field_size = field.ty.abiSize(mod); + const field_align = field.normalAlignment(mod); const field_name_copy = try gpa.dupeZ(u8, field_name); defer gpa.free(field_name_copy); @@ -2258,8 +2261,8 @@ pub const Object = struct { union_name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags di_fields.items.ptr, @intCast(c_int, di_fields.items.len), @@ -2319,8 +2322,8 @@ pub const Object = struct { name.ptr, null, // file 0, // line - ty.abiSize(target) * 8, // size in bits - ty.abiAlignment(target) * 8, // align in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags null, // derived from &full_di_fields, @@ -2341,8 +2344,8 @@ pub const Object = struct { defer param_di_types.deinit(); // Return type goes first. - if (fn_info.return_type.hasRuntimeBitsIgnoreComptime()) { - const sret = firstParamSRet(fn_info, target); + if (fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { + const sret = firstParamSRet(fn_info, mod); const di_ret_ty = if (sret) Type.void else fn_info.return_type; try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); @@ -2358,7 +2361,7 @@ pub const Object = struct { try param_di_types.append(try o.lowerDebugType(Type.void, .full)); } - if (fn_info.return_type.isError() and + if (fn_info.return_type.isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { var ptr_ty_payload: Type.Payload.ElemType = .{ @@ -2370,9 +2373,9 @@ pub const Object = struct { } for (fn_info.param_types) |param_ty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, .data = param_ty, @@ -2450,7 +2453,7 @@ pub const Object = struct { const stack_trace_str: []const u8 = "StackTrace"; // buffer is only used for int_type, `builtin` is a struct. - const builtin_ty = mod.declPtr(builtin_decl).val.toType(undefined); + const builtin_ty = mod.declPtr(builtin_decl).val.toType(); const builtin_namespace = builtin_ty.getNamespace().?; const stack_trace_decl_index = builtin_namespace.decls .getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .mod = mod }).?; @@ -2458,7 +2461,7 @@ pub const Object = struct { // Sema should have ensured that StackTrace was analyzed. assert(stack_trace_decl.has_tv); - return stack_trace_decl.val.toType(undefined); + return stack_trace_decl.val.toType(); } }; @@ -2495,9 +2498,10 @@ pub const DeclGen = struct { if (decl.val.castTag(.extern_fn)) |extern_fn| { _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); } else { - const target = dg.module.getTarget(); + const mod = dg.module; + const target = mod.getTarget(); var global = try dg.resolveGlobalDecl(decl_index); - global.setAlignment(decl.getAlignment(target)); + global.setAlignment(decl.getAlignment(mod)); if (decl.@"linksection") |section| global.setSection(section); assert(decl.has_tv); const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { @@ -2569,19 +2573,20 @@ pub const DeclGen = struct { /// Note that this can be called before the function's semantic analysis has /// completed, so if any attributes rely on that, they must be done in updateFunc, not here. fn resolveLlvmFunction(dg: *DeclGen, decl_index: Module.Decl.Index) !*llvm.Value { - const decl = dg.module.declPtr(decl_index); + const mod = dg.module; + const decl = mod.declPtr(decl_index); const zig_fn_type = decl.ty; const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index); if (gop.found_existing) return gop.value_ptr.*; assert(decl.has_tv); const fn_info = zig_fn_type.fnInfo(); - const target = dg.module.getTarget(); - const sret = firstParamSRet(fn_info, target); + const target = mod.getTarget(); + const sret = firstParamSRet(fn_info, mod); const fn_type = try dg.lowerType(zig_fn_type); - const fqn = try decl.getFullyQualifiedName(dg.module); + const fqn = try decl.getFullyQualifiedName(mod); defer dg.gpa.free(fqn); const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); @@ -2593,7 +2598,7 @@ pub const DeclGen = struct { llvm_fn.setLinkage(.Internal); llvm_fn.setUnnamedAddr(.True); } else { - if (dg.module.getTarget().isWasm()) { + if (target.isWasm()) { dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); if (decl.getExternFn().?.lib_name) |lib_name| { const module_name = std.mem.sliceTo(lib_name, 0); @@ -2612,8 +2617,8 @@ pub const DeclGen = struct { llvm_fn.addSretAttr(raw_llvm_ret_ty); } - const err_return_tracing = fn_info.return_type.isError() and - dg.module.comp.bin_file.options.error_return_tracing; + const err_return_tracing = fn_info.return_type.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { dg.addArgAttr(llvm_fn, @boolToInt(sret), "nonnull"); @@ -2656,14 +2661,14 @@ pub const DeclGen = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types[param_index]; - if (!isByRef(param_ty)) { + if (!isByRef(param_ty, mod)) { dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_ty = fn_info.param_types[it.zig_index - 1]; const param_llvm_ty = try dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -2784,12 +2789,13 @@ pub const DeclGen = struct { fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type { const llvm_ty = try lowerTypeInner(dg, t); + const mod = dg.module; if (std.debug.runtime_safety and false) check: { - if (t.zigTypeTag() == .Opaque) break :check; - if (!t.hasRuntimeBits()) break :check; + if (t.zigTypeTag(mod) == .Opaque) break :check; + if (!t.hasRuntimeBits(mod)) break :check; if (!llvm_ty.isSized().toBool()) break :check; - const zig_size = t.abiSize(dg.module.getTarget()); + const zig_size = t.abiSize(mod); const llvm_size = dg.object.target_data.abiSizeOfType(llvm_ty); if (llvm_size != zig_size) { log.err("when lowering {}, Zig ABI size = {d} but LLVM ABI size = {d}", .{ @@ -2802,18 +2808,18 @@ pub const DeclGen = struct { fn lowerTypeInner(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type { const gpa = dg.gpa; - const target = dg.module.getTarget(); - switch (t.zigTypeTag()) { + const mod = dg.module; + const target = mod.getTarget(); + switch (t.zigTypeTag(mod)) { .Void, .NoReturn => return dg.context.voidType(), .Int => { - const info = t.intInfo(target); + const info = t.intInfo(mod); assert(info.bits != 0); return dg.context.intType(info.bits); }, .Enum => { - var buffer: Type.Payload.Bits = undefined; - const int_ty = t.intTagType(&buffer); - const bit_count = int_ty.intInfo(target).bits; + const int_ty = t.intTagType(); + const bit_count = int_ty.intInfo(mod).bits; assert(bit_count != 0); return dg.context.intType(bit_count); }, @@ -2863,7 +2869,7 @@ pub const DeclGen = struct { }, .Array => { const elem_ty = t.childType(); - assert(elem_ty.onePossibleValue() == null); + assert(elem_ty.onePossibleValue(mod) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); @@ -2875,11 +2881,11 @@ pub const DeclGen = struct { .Optional => { var buf: Type.Payload.ElemType = undefined; const child_ty = t.optionalChild(&buf); - if (!child_ty.hasRuntimeBitsIgnoreComptime()) { + if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.context.intType(8); } const payload_llvm_ty = try dg.lowerType(child_ty); - if (t.optionalReprIsPayload()) { + if (t.optionalReprIsPayload(mod)) { return payload_llvm_ty; } @@ -2887,8 +2893,8 @@ pub const DeclGen = struct { var fields_buf: [3]*llvm.Type = .{ payload_llvm_ty, dg.context.intType(8), undefined, }; - const offset = child_ty.abiSize(target) + 1; - const abi_size = t.abiSize(target); + const offset = child_ty.abiSize(mod) + 1; + const abi_size = t.abiSize(mod); const padding = @intCast(c_uint, abi_size - offset); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2898,17 +2904,17 @@ pub const DeclGen = struct { }, .ErrorUnion => { const payload_ty = t.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try dg.lowerType(Type.anyerror); } const llvm_error_type = try dg.lowerType(Type.anyerror); const llvm_payload_type = try dg.lowerType(payload_ty); - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); - const payload_size = payload_ty.abiSize(target); - const error_size = Type.anyerror.abiSize(target); + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiSize(mod); var fields_buf: [3]*llvm.Type = undefined; if (error_align > payload_align) { @@ -2964,9 +2970,9 @@ pub const DeclGen = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -2979,7 +2985,7 @@ pub const DeclGen = struct { const field_llvm_ty = try dg.lowerType(field_ty); try llvm_field_types.append(gpa, field_llvm_ty); - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } { const prev_offset = offset; @@ -3027,11 +3033,11 @@ pub const DeclGen = struct { var big_align: u32 = 1; var any_underaligned_fields = false; - var it = struct_obj.runtimeFieldIterator(); + var it = struct_obj.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, struct_obj.layout); - const field_ty_align = field.ty.abiAlignment(target); + const field_align = field.alignment(mod, struct_obj.layout); + const field_ty_align = field.ty.abiAlignment(mod); any_underaligned_fields = any_underaligned_fields or field_align < field_ty_align; big_align = @max(big_align, field_align); @@ -3046,7 +3052,7 @@ pub const DeclGen = struct { const field_llvm_ty = try dg.lowerType(field.ty); try llvm_field_types.append(gpa, field_llvm_ty); - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } { const prev_offset = offset; @@ -3074,11 +3080,11 @@ pub const DeclGen = struct { // reference, we need to copy it here. gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const layout = t.unionGetLayout(target); + const layout = t.unionGetLayout(mod); const union_obj = t.cast(Type.Payload.Union).?.data; if (union_obj.layout == .Packed) { - const bitsize = @intCast(c_uint, t.bitSize(target)); + const bitsize = @intCast(c_uint, t.bitSize(mod)); const int_llvm_ty = dg.context.intType(bitsize); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; @@ -3155,19 +3161,19 @@ pub const DeclGen = struct { } fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type { - const target = dg.module.getTarget(); + const mod = dg.module; const fn_info = fn_ty.fnInfo(); const llvm_ret_ty = try lowerFnRetTy(dg, fn_info); var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa); defer llvm_params.deinit(); - if (firstParamSRet(fn_info, target)) { + if (firstParamSRet(fn_info, mod)) { try llvm_params.append(dg.context.pointerType(0)); } - if (fn_info.return_type.isError() and - dg.module.comp.bin_file.options.error_return_tracing) + if (fn_info.return_type.isError(mod) and + mod.comp.bin_file.options.error_return_tracing) { var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -3189,14 +3195,14 @@ pub const DeclGen = struct { }, .abi_sized_int => { const param_ty = fn_info.param_types[it.zig_index - 1]; - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); try llvm_params.append(dg.context.intType(abi_size * 8)); }, .slice => { const param_ty = fn_info.param_types[it.zig_index - 1]; var buf: Type.SlicePtrFieldTypeBuffer = undefined; var opt_buf: Type.Payload.ElemType = undefined; - const ptr_ty = if (param_ty.zigTypeTag() == .Optional) + const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf) else param_ty.slicePtrFieldType(&buf); @@ -3215,7 +3221,7 @@ pub const DeclGen = struct { }, .float_array => |count| { const param_ty = fn_info.param_types[it.zig_index - 1]; - const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty).?); + const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); const field_count = @intCast(c_uint, count); const arr_ty = float_ty.arrayType(field_count); try llvm_params.append(arr_ty); @@ -3239,11 +3245,12 @@ pub const DeclGen = struct { /// being a zero bit type, but it should still be lowered as an i8 in such case. /// There are other similar cases handled here as well. fn lowerPtrElemTy(dg: *DeclGen, elem_ty: Type) Allocator.Error!*llvm.Type { - const lower_elem_ty = switch (elem_ty.zigTypeTag()) { + const mod = dg.module; + const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, .Fn => !elem_ty.fnInfo().is_generic, - .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(), - else => elem_ty.hasRuntimeBitsIgnoreComptime(), + .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(mod), + else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; const llvm_elem_ty = if (lower_elem_ty) try dg.lowerType(elem_ty) @@ -3262,9 +3269,9 @@ pub const DeclGen = struct { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } - const target = dg.module.getTarget(); - - switch (tv.ty.zigTypeTag()) { + const mod = dg.module; + const target = mod.getTarget(); + switch (tv.ty.zigTypeTag(mod)) { .Bool => { const llvm_type = try dg.lowerType(tv.ty); return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); @@ -3276,8 +3283,8 @@ pub const DeclGen = struct { .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), else => { var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space, target); - const int_info = tv.ty.intInfo(target); + const bigint = tv.val.toBigInt(&bigint_space, mod); + const int_info = tv.ty.intInfo(mod); assert(int_info.bits != 0); const llvm_type = dg.context.intType(int_info.bits); @@ -3304,9 +3311,9 @@ pub const DeclGen = struct { const int_val = tv.enumToInt(&int_buffer); var bigint_space: Value.BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_space, target); + const bigint = int_val.toBigInt(&bigint_space, mod); - const int_info = tv.ty.intInfo(target); + const int_info = tv.ty.intInfo(mod); const llvm_type = dg.context.intType(int_info.bits); const unsigned_val = v: { @@ -3408,7 +3415,7 @@ pub const DeclGen = struct { }, .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False); + const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); }, .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { @@ -3439,7 +3446,7 @@ pub const DeclGen = struct { const str_lit = tv.val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; if (tv.ty.sentinel()) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(target)); + const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); if (byte == 0 and bytes.len > 0) { return dg.context.constString( bytes.ptr, @@ -3549,13 +3556,13 @@ pub const DeclGen = struct { const payload_ty = tv.ty.optionalChild(&buf); const llvm_i8 = dg.context.intType(8); - const is_pl = !tv.val.isNull(); + const is_pl = !tv.val.isNull(mod); const non_null_bit = if (is_pl) llvm_i8.constInt(1, .False) else llvm_i8.constNull(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return non_null_bit; } const llvm_ty = try dg.lowerType(tv.ty); - if (tv.ty.optionalReprIsPayload()) { + if (tv.ty.optionalReprIsPayload(mod)) { if (tv.val.castTag(.opt_payload)) |payload| { return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }); } else if (is_pl) { @@ -3564,7 +3571,7 @@ pub const DeclGen = struct { return llvm_ty.constNull(); } } - assert(payload_ty.zigTypeTag() != .Fn); + assert(payload_ty.zigTypeTag(mod) != .Fn); const llvm_field_count = llvm_ty.countStructElementTypes(); var fields_buf: [3]*llvm.Value = undefined; @@ -3607,14 +3614,14 @@ pub const DeclGen = struct { const payload_type = tv.ty.errorUnionPayload(); const is_pl = tv.val.errorUnionIsPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const err_val = if (!is_pl) tv.val else Value.initTag(.zero); return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } - const payload_align = payload_type.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); + const payload_align = payload_type.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, .val = if (is_pl) Value.initTag(.zero) else tv.val, @@ -3661,9 +3668,9 @@ pub const DeclGen = struct { for (tuple.types, 0..) |field_ty, i| { if (tuple.values[i].tag() != .unreachable_value) continue; - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -3685,7 +3692,7 @@ pub const DeclGen = struct { llvm_fields.appendAssumeCapacity(field_llvm_val); - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } { const prev_offset = offset; @@ -3715,7 +3722,7 @@ pub const DeclGen = struct { if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(target); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); @@ -3723,15 +3730,15 @@ pub const DeclGen = struct { var running_bits: u16 = 0; for (field_vals, 0..) |field_val, i| { const field = fields[i]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try dg.lowerValue(.{ .ty = field.ty, .val = field_val, }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) else non_int_val.constBitCast(small_int_ty); @@ -3756,10 +3763,10 @@ pub const DeclGen = struct { var big_align: u32 = 0; var need_unnamed = false; - var it = struct_obj.runtimeFieldIterator(); + var it = struct_obj.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, struct_obj.layout); + const field_align = field.alignment(mod, struct_obj.layout); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -3781,7 +3788,7 @@ pub const DeclGen = struct { llvm_fields.appendAssumeCapacity(field_llvm_val); - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } { const prev_offset = offset; @@ -3810,7 +3817,7 @@ pub const DeclGen = struct { const llvm_union_ty = try dg.lowerType(tv.ty); const tag_and_val = tv.val.castTag(.@"union").?.data; - const layout = tv.ty.unionGetLayout(target); + const layout = tv.ty.unionGetLayout(mod); if (layout.payload_size == 0) { return lowerValue(dg, .{ @@ -3824,12 +3831,12 @@ pub const DeclGen = struct { const field_ty = union_obj.fields.values()[field_index].ty; if (union_obj.layout == .Packed) { - if (!field_ty.hasRuntimeBits()) + if (!field_ty.hasRuntimeBits(mod)) return llvm_union_ty.constNull(); const non_int_val = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); - const ty_bit_size = @intCast(u16, field_ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field_ty.bitSize(mod)); const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field_ty.isPtrAtRuntime()) + const small_int_val = if (field_ty.isPtrAtRuntime(mod)) non_int_val.constPtrToInt(small_int_ty) else non_int_val.constBitCast(small_int_ty); @@ -3842,13 +3849,13 @@ pub const DeclGen = struct { // must pointer cast to the expected type before accessing the union. var need_unnamed: bool = layout.most_aligned_field != field_index; const payload = p: { - if (!field_ty.hasRuntimeBitsIgnoreComptime()) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = @intCast(c_uint, layout.payload_size); break :p dg.context.intType(8).arrayType(padding_len).getUndef(); } const field = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field); - const field_size = field_ty.abiSize(target); + const field_size = field_ty.abiSize(mod); if (field_size == layout.payload_size) { break :p field; } @@ -4012,7 +4019,8 @@ pub const DeclGen = struct { } fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { - const target = dg.module.getTarget(); + const mod = dg.module; + const target = mod.getTarget(); switch (ptr_val.tag()) { .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; @@ -4045,13 +4053,13 @@ pub const DeclGen = struct { const field_index = @intCast(u32, field_ptr.field_index); const llvm_u32 = dg.context.intType(32); - switch (parent_ty.zigTypeTag()) { + switch (parent_ty.zigTypeTag(mod)) { .Union => { if (parent_ty.containerLayout() == .Packed) { return parent_llvm_ptr; } - const layout = parent_ty.unionGetLayout(target); + const layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) { // In this case a pointer to the union and a pointer to any // (void) payload is the same. @@ -4077,8 +4085,8 @@ pub const DeclGen = struct { const prev_bits = b: { var b: usize = 0; for (parent_ty.structFields().values()[0..field_index]) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime()) continue; - b += @intCast(usize, field.ty.bitSize(target)); + if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + b += @intCast(usize, field.ty.bitSize(mod)); } break :b b; }; @@ -4091,14 +4099,14 @@ pub const DeclGen = struct { var ty_buf: Type.Payload.Pointer = undefined; const parent_llvm_ty = try dg.lowerType(parent_ty); - if (llvmFieldIndex(parent_ty, field_index, target, &ty_buf)) |llvm_field_index| { + if (llvmFieldIndex(parent_ty, field_index, mod, &ty_buf)) |llvm_field_index| { const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(llvm_field_index, .False), }; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } else { - const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime()), .False); + const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); const indices: [1]*llvm.Value = .{llvm_index}; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } @@ -4132,8 +4140,8 @@ pub const DeclGen = struct { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or - payload_ty.optionalReprIsPayload()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + payload_ty.optionalReprIsPayload(mod)) { // In this case, we represent pointer to optional the same as pointer // to the payload. @@ -4153,13 +4161,13 @@ pub const DeclGen = struct { const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true); const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // In this case, we represent pointer to error union the same as pointer // to the payload. return parent_llvm_ptr; } - const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; const llvm_u32 = dg.context.intType(32); const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), @@ -4177,12 +4185,13 @@ pub const DeclGen = struct { tv: TypedValue, decl_index: Module.Decl.Index, ) Error!*llvm.Value { + const mod = self.module; if (tv.ty.isSlice()) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf); var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, - .data = tv.val.sliceLen(self.module), + .data = tv.val.sliceLen(mod), }; const fields: [2]*llvm.Value = .{ try self.lowerValue(.{ @@ -4202,7 +4211,7 @@ pub const DeclGen = struct { // const bar = foo; // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. - const decl = self.module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.val.castTag(.function)) |func| { if (func.data.owner_decl != decl_index) { return self.lowerDeclRefValue(tv, func.data.owner_decl); @@ -4213,21 +4222,21 @@ pub const DeclGen = struct { } } - const is_fn_body = decl.ty.zigTypeTag() == .Fn; - if ((!is_fn_body and !decl.ty.hasRuntimeBits()) or + const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; + if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or (is_fn_body and decl.ty.fnInfo().is_generic)) { return self.lowerPtrToVoid(tv.ty); } - self.module.markDeclAlive(decl); + mod.markDeclAlive(decl); const llvm_decl_val = if (is_fn_body) try self.resolveLlvmFunction(decl_index) else try self.resolveGlobalDecl(decl_index); - const target = self.module.getTarget(); + const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); const llvm_val = if (llvm_wanted_addrspace != llvm_actual_addrspace) blk: { @@ -4236,7 +4245,7 @@ pub const DeclGen = struct { } else llvm_decl_val; const llvm_type = try self.lowerType(tv.ty); - if (tv.ty.zigTypeTag() == .Int) { + if (tv.ty.zigTypeTag(mod) == .Int) { return llvm_val.constPtrToInt(llvm_type); } else { return llvm_val.constBitCast(llvm_type); @@ -4338,21 +4347,20 @@ pub const DeclGen = struct { /// RMW exchange of floating-point values is bitcasted to same-sized integer /// types to work around a LLVM deficiency when targeting ARM/AArch64. fn getAtomicAbiType(dg: *DeclGen, ty: Type, is_rmw_xchg: bool) ?*llvm.Type { - const target = dg.module.getTarget(); - var buffer: Type.Payload.Bits = undefined; - const int_ty = switch (ty.zigTypeTag()) { + const mod = dg.module; + const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => ty.intTagType(&buffer), + .Enum => ty.intTagType(), .Float => { if (!is_rmw_xchg) return null; - return dg.context.intType(@intCast(c_uint, ty.abiSize(target) * 8)); + return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8)); }, .Bool => return dg.context.intType(8), else => return null, }; - const bit_count = int_ty.intInfo(target).bits; + const bit_count = int_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count) or (bit_count % 8) != 0) { - return dg.context.intType(@intCast(c_uint, int_ty.abiSize(target) * 8)); + return dg.context.intType(@intCast(c_uint, int_ty.abiSize(mod) * 8)); } else { return null; } @@ -4366,15 +4374,15 @@ pub const DeclGen = struct { fn_info: Type.Payload.Function.Data, llvm_arg_i: u32, ) void { - const target = dg.module.getTarget(); - if (param_ty.isPtrAtRuntime()) { + const mod = dg.module; + if (param_ty.isPtrAtRuntime(mod)) { const ptr_info = param_ty.ptrInfo().data; if (math.cast(u5, param_index)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); } } - if (!param_ty.isPtrLikeOptional() and !ptr_info.@"allowzero") { + if (!param_ty.isPtrLikeOptional(mod) and !ptr_info.@"allowzero") { dg.addArgAttr(llvm_fn, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -4383,13 +4391,10 @@ pub const DeclGen = struct { if (ptr_info.@"align" != 0) { dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max( - ptr_info.pointee_type.abiAlignment(target), - 1, - ); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); dg.addArgAttrInt(llvm_fn, llvm_arg_i, "align", elem_align); } - } else if (ccAbiPromoteInt(fn_info.cc, target, param_ty)) |s| switch (s) { + } else if (ccAbiPromoteInt(fn_info.cc, mod, param_ty)) |s| switch (s) { .signed => dg.addArgAttr(llvm_fn, llvm_arg_i, "signext"), .unsigned => dg.addArgAttr(llvm_fn, llvm_arg_i, "zeroext"), }; @@ -4490,9 +4495,10 @@ pub const FuncGen = struct { const gop = try self.func_inst_table.getOrPut(self.dg.gpa, inst); if (gop.found_existing) return gop.value_ptr.*; + const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ .ty = self.air.typeOf(inst), - .val = self.air.value(inst).?, + .val = self.air.value(inst, mod).?, }); gop.value_ptr.* = llvm_val; return llvm_val; @@ -4500,11 +4506,12 @@ pub const FuncGen = struct { fn resolveValue(self: *FuncGen, tv: TypedValue) !*llvm.Value { const llvm_val = try self.dg.lowerValue(tv); - if (!isByRef(tv.ty)) return llvm_val; + const mod = self.dg.module; + if (!isByRef(tv.ty, mod)) return llvm_val; // We have an LLVM value but we need to create a global constant and // set the value as its initializer, and then return a pointer to the global. - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const llvm_wanted_addrspace = toLlvmAddressSpace(.generic, target); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(.generic, target); const global = self.dg.object.llvm_module.addGlobalInAddressSpace(llvm_val.typeOf(), "", llvm_actual_addrspace); @@ -4512,7 +4519,7 @@ pub const FuncGen = struct { global.setLinkage(.Private); global.setGlobalConstant(.True); global.setUnnamedAddr(.True); - global.setAlignment(tv.ty.abiAlignment(target)); + global.setAlignment(tv.ty.abiAlignment(mod)); const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) global.constAddrSpaceCast(self.context.pointerType(llvm_wanted_addrspace)) else @@ -4775,7 +4782,8 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const callee_ty = self.air.typeOf(pl_op.operand); - const zig_fn_ty = switch (callee_ty.zigTypeTag()) { + const mod = self.dg.module; + const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => callee_ty.childType(), else => unreachable, @@ -4783,20 +4791,20 @@ pub const FuncGen = struct { const fn_info = zig_fn_ty.fnInfo(); const return_type = fn_info.return_type; const llvm_fn = try self.resolveInst(pl_op.operand); - const target = self.dg.module.getTarget(); - const sret = firstParamSRet(fn_info, target); + const target = mod.getTarget(); + const sret = firstParamSRet(fn_info, mod); var llvm_args = std.ArrayList(*llvm.Value).init(self.gpa); defer llvm_args.deinit(); const ret_ptr = if (!sret) null else blk: { const llvm_ret_ty = try self.dg.lowerType(return_type); - const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(target)); + const ret_ptr = self.buildAlloca(llvm_ret_ty, return_type.abiAlignment(mod)); try llvm_args.append(ret_ptr); break :blk ret_ptr; }; - const err_return_tracing = fn_info.return_type.isError() and + const err_return_tracing = fn_info.return_type.isError(mod) and self.dg.module.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { try llvm_args.append(self.err_ret_trace.?); @@ -4810,8 +4818,8 @@ pub const FuncGen = struct { const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try self.dg.lowerType(param_ty); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(llvm_param_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4823,10 +4831,10 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { try llvm_args.append(llvm_arg); } else { - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = llvm_arg.typeOf(); const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); const store_inst = self.builder.buildStore(llvm_arg, arg_ptr); @@ -4839,10 +4847,10 @@ pub const FuncGen = struct { const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); const param_llvm_ty = try self.dg.lowerType(param_ty); const arg_ptr = self.buildAlloca(param_llvm_ty, alignment); - if (isByRef(param_ty)) { + if (isByRef(param_ty, mod)) { const load_inst = self.builder.buildLoad(param_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); @@ -4859,11 +4867,11 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const param_ty = self.air.typeOf(arg); const llvm_arg = try self.resolveInst(arg); - const abi_size = @intCast(c_uint, param_ty.abiSize(target)); + const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = self.context.intType(abi_size * 8); - if (isByRef(param_ty)) { - const alignment = param_ty.abiAlignment(target); + if (isByRef(param_ty, mod)) { + const alignment = param_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(int_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4871,7 +4879,7 @@ pub const FuncGen = struct { // LLVM does not allow bitcasting structs so we must allocate // a local, store as one type, and then load as another type. const alignment = @max( - param_ty.abiAlignment(target), + param_ty.abiAlignment(mod), self.dg.object.target_data.abiAlignmentOfType(int_llvm_ty), ); const int_ptr = self.buildAlloca(int_llvm_ty, alignment); @@ -4896,11 +4904,11 @@ pub const FuncGen = struct { const param_ty = self.air.typeOf(arg); const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len]; const llvm_arg = try self.resolveInst(arg); - const is_by_ref = isByRef(param_ty); + const is_by_ref = isByRef(param_ty, mod); const arg_ptr = if (is_by_ref) llvm_arg else p: { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(param_ty.abiAlignment(target)); + store_inst.setAlignment(param_ty.abiAlignment(mod)); break :p p; }; @@ -4924,17 +4932,17 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.air.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - if (!isByRef(arg_ty)) { + if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(target)); + store_inst.setAlignment(arg_ty.abiAlignment(mod)); llvm_arg = store_inst; } - const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty).?); + const float_ty = try self.dg.lowerType(aarch64_c_abi.getFloatArrayType(arg_ty, mod).?); const array_llvm_ty = float_ty.arrayType(count); - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4944,15 +4952,15 @@ pub const FuncGen = struct { const arg = args[it.zig_index - 1]; const arg_ty = self.air.typeOf(arg); var llvm_arg = try self.resolveInst(arg); - if (!isByRef(arg_ty)) { + if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); const store_inst = self.builder.buildStore(llvm_arg, p); - store_inst.setAlignment(arg_ty.abiAlignment(target)); + store_inst.setAlignment(arg_ty.abiAlignment(mod)); llvm_arg = store_inst; } const array_llvm_ty = self.context.intType(elem_size).arrayType(arr_len); - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const load_inst = self.builder.buildLoad(array_llvm_ty, llvm_arg, ""); load_inst.setAlignment(alignment); try llvm_args.append(load_inst); @@ -4969,7 +4977,7 @@ pub const FuncGen = struct { "", ); - if (callee_ty.zigTypeTag() == .Pointer) { + if (callee_ty.zigTypeTag(mod) == .Pointer) { // Add argument attributes for function pointer calls. it = iterateParamTypes(self.dg, fn_info); it.llvm_index += @boolToInt(sret); @@ -4978,7 +4986,7 @@ pub const FuncGen = struct { .byval => { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types[param_index]; - if (!isByRef(param_ty)) { + if (!isByRef(param_ty, mod)) { self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1); } }, @@ -4986,7 +4994,7 @@ pub const FuncGen = struct { const param_index = it.zig_index - 1; const param_ty = fn_info.param_types[param_index]; const param_llvm_ty = try self.dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(target); + const alignment = param_ty.abiAlignment(mod); self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -5013,7 +5021,7 @@ pub const FuncGen = struct { self.dg.addArgAttr(call, llvm_arg_i, "noalias"); } } - if (param_ty.zigTypeTag() != .Optional) { + if (param_ty.zigTypeTag(mod) != .Optional) { self.dg.addArgAttr(call, llvm_arg_i, "nonnull"); } if (!ptr_info.mutable) { @@ -5022,7 +5030,7 @@ pub const FuncGen = struct { if (ptr_info.@"align" != 0) { self.dg.addArgAttrInt(call, llvm_arg_i, "align", ptr_info.@"align"); } else { - const elem_align = @max(ptr_info.pointee_type.abiAlignment(target), 1); + const elem_align = @max(ptr_info.pointee_type.abiAlignment(mod), 1); self.dg.addArgAttrInt(call, llvm_arg_i, "align", elem_align); } }, @@ -5033,7 +5041,7 @@ pub const FuncGen = struct { return null; } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) { + if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { return null; } @@ -5041,12 +5049,12 @@ pub const FuncGen = struct { if (ret_ptr) |rp| { call.setCallSret(llvm_ret_ty); - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { return rp; } else { // our by-ref status disagrees with sret so we must load. const loaded = self.builder.buildLoad(llvm_ret_ty, rp, ""); - loaded.setAlignment(return_type.abiAlignment(target)); + loaded.setAlignment(return_type.abiAlignment(mod)); return loaded; } } @@ -5061,7 +5069,7 @@ pub const FuncGen = struct { const rp = self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { return rp; } else { const load_inst = self.builder.buildLoad(llvm_ret_ty, rp, ""); @@ -5070,10 +5078,10 @@ pub const FuncGen = struct { } } - if (isByRef(return_type)) { + if (isByRef(return_type, mod)) { // our by-ref status disagrees with sret so we must allocate, store, // and return the allocation pointer. - const alignment = return_type.abiAlignment(target); + const alignment = return_type.abiAlignment(mod); const rp = self.buildAlloca(llvm_ret_ty, alignment); const store_inst = self.builder.buildStore(call, rp); store_inst.setAlignment(alignment); @@ -5084,6 +5092,7 @@ pub const FuncGen = struct { } fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ret_ty = self.air.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { @@ -5098,8 +5107,8 @@ pub const FuncGen = struct { return null; } const fn_info = self.dg.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (fn_info.return_type.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5113,10 +5122,9 @@ pub const FuncGen = struct { const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const operand = try self.resolveInst(un_op); - const target = self.dg.module.getTarget(); - const alignment = ret_ty.abiAlignment(target); + const alignment = ret_ty.abiAlignment(mod); - if (isByRef(ret_ty)) { + if (isByRef(ret_ty, mod)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. const load_inst = self.builder.buildLoad(abi_ret_ty, operand, ""); @@ -5145,8 +5153,9 @@ pub const FuncGen = struct { const ptr_ty = self.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); const fn_info = self.dg.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (fn_info.return_type.isError()) { + const mod = self.dg.module; + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5162,10 +5171,9 @@ pub const FuncGen = struct { return null; } const ptr = try self.resolveInst(un_op); - const target = self.dg.module.getTarget(); const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const loaded = self.builder.buildLoad(abi_ret_ty, ptr, ""); - loaded.setAlignment(ret_ty.abiAlignment(target)); + loaded.setAlignment(ret_ty.abiAlignment(mod)); _ = self.builder.buildRet(loaded); return null; } @@ -5184,9 +5192,9 @@ pub const FuncGen = struct { const src_list = try self.resolveInst(ty_op.operand); const va_list_ty = self.air.getRefType(ty_op.ty); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); + const mod = self.dg.module; - const target = self.dg.module.getTarget(); - const result_alignment = va_list_ty.abiAlignment(target); + const result_alignment = va_list_ty.abiAlignment(mod); const dest_list = self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_copy"; @@ -5202,7 +5210,7 @@ pub const FuncGen = struct { const args: [2]*llvm.Value = .{ dest_list, src_list }; _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - if (isByRef(va_list_ty)) { + if (isByRef(va_list_ty, mod)) { return dest_list; } else { const loaded = self.builder.buildLoad(llvm_va_list_ty, dest_list, ""); @@ -5227,11 +5235,11 @@ pub const FuncGen = struct { } fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const va_list_ty = self.air.typeOfIndex(inst); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); - const target = self.dg.module.getTarget(); - const result_alignment = va_list_ty.abiAlignment(target); + const result_alignment = va_list_ty.abiAlignment(mod); const list = self.buildAlloca(llvm_va_list_ty, result_alignment); const llvm_fn_name = "llvm.va_start"; @@ -5243,7 +5251,7 @@ pub const FuncGen = struct { const args: [1]*llvm.Value = .{list}; _ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, ""); - if (isByRef(va_list_ty)) { + if (isByRef(va_list_ty, mod)) { return list; } else { const loaded = self.builder.buildLoad(llvm_va_list_ty, list, ""); @@ -5292,23 +5300,23 @@ pub const FuncGen = struct { operand_ty: Type, op: math.CompareOperator, ) Allocator.Error!*llvm.Value { - var int_buffer: Type.Payload.Bits = undefined; var opt_buffer: Type.Payload.ElemType = undefined; - const scalar_ty = operand_ty.scalarType(); - const int_ty = switch (scalar_ty.zigTypeTag()) { - .Enum => scalar_ty.intTagType(&int_buffer), + const mod = self.dg.module; + const scalar_ty = operand_ty.scalarType(mod); + const int_ty = switch (scalar_ty.zigTypeTag(mod)) { + .Enum => scalar_ty.intTagType(), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or - operand_ty.optionalReprIsPayload()) + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or + operand_ty.optionalReprIsPayload(mod)) { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality // of optionals that are not pointers. - const is_by_ref = isByRef(scalar_ty); + const is_by_ref = isByRef(scalar_ty, mod); const opt_llvm_ty = try self.dg.lowerType(scalar_ty); const lhs_non_null = self.optIsNonNull(opt_llvm_ty, lhs, is_by_ref); const rhs_non_null = self.optIsNonNull(opt_llvm_ty, rhs, is_by_ref); @@ -5375,7 +5383,7 @@ pub const FuncGen = struct { .Float => return self.buildFloatCmp(op, operand_ty, .{ lhs, rhs }), else => unreachable, }; - const is_signed = int_ty.isSignedInt(); + const is_signed = int_ty.isSignedInt(mod); const operation: llvm.IntPredicate = switch (op) { .eq => .EQ, .neq => .NE, @@ -5393,6 +5401,7 @@ pub const FuncGen = struct { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const inst_ty = self.air.typeOfIndex(inst); const parent_bb = self.context.createBasicBlock("Block"); + const mod = self.dg.module; if (inst_ty.isNoReturn()) { try self.genBody(body); @@ -5414,8 +5423,8 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(parent_bb); // Create a phi node only if the block returns a value. - const is_body = inst_ty.zigTypeTag() == .Fn; - if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime()) return null; + const is_body = inst_ty.zigTypeTag(mod) == .Fn; + if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const raw_llvm_ty = try self.dg.lowerType(inst_ty); @@ -5424,7 +5433,7 @@ pub const FuncGen = struct { // a pointer to it. LLVM IR allows the call instruction to use function bodies instead // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. - if (is_body or isByRef(inst_ty)) { + if (is_body or isByRef(inst_ty, mod)) { break :ty self.context.pointerType(0); } break :ty raw_llvm_ty; @@ -5445,7 +5454,8 @@ pub const FuncGen = struct { // Add the values to the lists only if the break provides a value. const operand_ty = self.air.typeOf(branch.operand); - if (operand_ty.hasRuntimeBitsIgnoreComptime() or operand_ty.zigTypeTag() == .Fn) { + const mod = self.dg.module; + if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the @@ -5481,6 +5491,7 @@ pub const FuncGen = struct { } fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const err_union = try self.resolveInst(pl_op.operand); @@ -5488,7 +5499,7 @@ pub const FuncGen = struct { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const err_union_ty = self.air.typeOf(pl_op.operand); const payload_ty = self.air.typeOfIndex(inst); - const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); } @@ -5512,9 +5523,9 @@ pub const FuncGen = struct { can_elide_load: bool, is_unused: bool, ) !?*llvm.Value { + const mod = fg.dg.module; const payload_ty = err_union_ty.errorUnionPayload(); - const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(); - const target = fg.dg.module.getTarget(); + const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_union_llvm_ty = try fg.dg.lowerType(err_union_ty); if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { @@ -5529,8 +5540,8 @@ pub const FuncGen = struct { err_union; break :err fg.builder.buildICmp(.NE, loaded, zero, ""); } - const err_field_index = errUnionErrorOffset(payload_ty, target); - if (operand_is_ptr or isByRef(err_union_ty)) { + const err_field_index = errUnionErrorOffset(payload_ty, mod); + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_field_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, err_field_index, ""); // TODO add alignment to this load const loaded = fg.builder.buildLoad(err_set_ty, err_field_ptr, ""); @@ -5555,30 +5566,31 @@ pub const FuncGen = struct { if (!payload_has_bits) { return if (operand_is_ptr) err_union else null; } - const offset = errUnionPayloadOffset(payload_ty, target); + const offset = errUnionPayloadOffset(payload_ty, mod); if (operand_is_ptr) { return fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); - } else if (isByRef(err_union_ty)) { + } else if (isByRef(err_union_ty, mod)) { const payload_ptr = fg.builder.buildStructGEP(err_union_llvm_ty, err_union, offset, ""); - if (isByRef(payload_ty)) { + if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; - return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false); + return fg.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); } const load_inst = fg.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(target)); + load_inst.setAlignment(payload_ty.abiAlignment(mod)); return load_inst; } return fg.builder.buildExtractValue(err_union, offset, ""); } fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const else_block = self.context.appendBasicBlock(self.llvm_func, "Else"); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const llvm_usize = self.context.intType(target.ptrBitWidth()); const cond_int = if (cond.typeOf().getTypeKind() == .Pointer) self.builder.buildPtrToInt(cond, llvm_usize, "") @@ -5645,6 +5657,7 @@ pub const FuncGen = struct { } fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const array_ty = operand_ty.childType(); @@ -5652,7 +5665,7 @@ pub const FuncGen = struct { const len = llvm_usize.constInt(array_ty.arrayLen(), .False); const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); - if (!array_ty.hasRuntimeBitsIgnoreComptime()) { + if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); } @@ -5666,30 +5679,31 @@ pub const FuncGen = struct { } fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const operand_scalar_ty = operand_ty.scalarType(); + const operand_scalar_ty = operand_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); - const dest_scalar_ty = dest_ty.scalarType(); + const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); if (intrinsicsAllowed(dest_scalar_ty, target)) { - if (operand_scalar_ty.isSignedInt()) { + if (operand_scalar_ty.isSignedInt(mod)) { return self.builder.buildSIToFP(operand, dest_llvm_ty, ""); } else { return self.builder.buildUIToFP(operand, dest_llvm_ty, ""); } } - const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(target)); + const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(mod)); const rt_int_bits = compilerRtIntBits(operand_bits); const rt_int_ty = self.context.intType(rt_int_bits); var extended = e: { - if (operand_scalar_ty.isSignedInt()) { + if (operand_scalar_ty.isSignedInt(mod)) { break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, ""); } else { break :e self.builder.buildZExtOrBitCast(operand, rt_int_ty, ""); @@ -5698,7 +5712,7 @@ pub const FuncGen = struct { const dest_bits = dest_scalar_ty.floatBits(target); const compiler_rt_operand_abbrev = compilerRtIntAbbrev(rt_int_bits); const compiler_rt_dest_abbrev = compilerRtFloatAbbrev(dest_bits); - const sign_prefix = if (operand_scalar_ty.isSignedInt()) "" else "un"; + const sign_prefix = if (operand_scalar_ty.isSignedInt(mod)) "" else "un"; var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__float{s}{s}i{s}f", .{ sign_prefix, @@ -5724,27 +5738,28 @@ pub const FuncGen = struct { fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const operand_scalar_ty = operand_ty.scalarType(); + const operand_scalar_ty = operand_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); - const dest_scalar_ty = dest_ty.scalarType(); + const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag - if (dest_scalar_ty.isSignedInt()) { + if (dest_scalar_ty.isSignedInt(mod)) { return self.builder.buildFPToSI(operand, dest_llvm_ty, ""); } else { return self.builder.buildFPToUI(operand, dest_llvm_ty, ""); } } - const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(target))); + const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(mod))); const ret_ty = self.context.intType(rt_int_bits); const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: { // On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard @@ -5756,7 +5771,7 @@ pub const FuncGen = struct { const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits); const compiler_rt_dest_abbrev = compilerRtIntAbbrev(rt_int_bits); - const sign_prefix = if (dest_scalar_ty.isSignedInt()) "" else "uns"; + const sign_prefix = if (dest_scalar_ty.isSignedInt(mod)) "" else "uns"; var fn_name_buf: [64]u8 = undefined; const fn_name = std.fmt.bufPrintZ(&fn_name_buf, "__fix{s}{s}f{s}i", .{ @@ -5786,13 +5801,14 @@ pub const FuncGen = struct { } fn sliceOrArrayLenInBytes(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { - const target = fg.dg.module.getTarget(); + const mod = fg.dg.module; + const target = mod.getTarget(); const llvm_usize_ty = fg.context.intType(target.ptrBitWidth()); switch (ty.ptrSize()) { .Slice => { const len = fg.builder.buildExtractValue(ptr, 1, ""); const elem_ty = ty.childType(); - const abi_size = elem_ty.abiSize(target); + const abi_size = elem_ty.abiSize(mod); if (abi_size == 1) return len; const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False); return fg.builder.buildMul(len, abi_size_llvm_val, ""); @@ -5800,7 +5816,7 @@ pub const FuncGen = struct { .One => { const array_ty = ty.childType(); const elem_ty = array_ty.childType(); - const abi_size = elem_ty.abiSize(target); + const abi_size = elem_ty.abiSize(mod); return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False); }, .Many, .C => unreachable, @@ -5823,6 +5839,7 @@ pub const FuncGen = struct { } fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.air.typeOf(bin_op.lhs); @@ -5833,12 +5850,11 @@ pub const FuncGen = struct { const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); } return self.load(ptr, slice_ty); @@ -5858,6 +5874,7 @@ pub const FuncGen = struct { } fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -5866,15 +5883,14 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try self.dg.lowerType(array_ty); const elem_ty = array_ty.childType(); - if (isByRef(array_ty)) { + if (isByRef(array_ty, mod)) { const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, ""); if (canElideLoad(self, body_tail)) return elem_ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(elem_ptr, elem_ty, elem_ty.abiAlignment(mod), false); } else { const lhs_index = Air.refToIndex(bin_op.lhs).?; const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -5901,6 +5917,7 @@ pub const FuncGen = struct { } fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); @@ -5917,23 +5934,23 @@ pub const FuncGen = struct { const indices: [1]*llvm.Value = .{rhs}; break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); }; - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { if (self.canElideLoad(body_tail)) return ptr; - const target = self.dg.module.getTarget(); - return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(target), false); + return self.loadByRef(ptr, elem_ty, elem_ty.abiAlignment(mod), false); } return self.load(ptr, ptr_ty); } fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -5972,6 +5989,7 @@ pub const FuncGen = struct { } fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -5979,29 +5997,28 @@ pub const FuncGen = struct { const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) { + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { return null; } - const target = self.dg.module.getTarget(); - if (!isByRef(struct_ty)) { - assert(!isByRef(field_ty)); - switch (struct_ty.zigTypeTag()) { + if (!isByRef(struct_ty, mod)) { + assert(!isByRef(field_ty, mod)); + switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { const struct_obj = struct_ty.castTag(.@"struct").?.data; - const bit_offset = struct_obj.packedFieldBitOffset(target, field_index); + const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index); const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(field_ty); - if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); - } else if (field_ty.isPtrAtRuntime()) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + } else if (field_ty.isPtrAtRuntime(mod)) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -6010,7 +6027,7 @@ pub const FuncGen = struct { }, else => { var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, ""); }, }, @@ -6018,13 +6035,13 @@ pub const FuncGen = struct { assert(struct_ty.containerLayout() == .Packed); const containing_int = struct_llvm_val; const elem_llvm_ty = try self.dg.lowerType(field_ty); - if (field_ty.zigTypeTag() == .Float or field_ty.zigTypeTag() == .Vector) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); - } else if (field_ty.isPtrAtRuntime()) { - const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); + } else if (field_ty.isPtrAtRuntime(mod)) { + const elem_bits = @intCast(c_uint, field_ty.bitSize(mod)); const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(containing_int, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -6035,30 +6052,30 @@ pub const FuncGen = struct { } } - switch (struct_ty.zigTypeTag()) { + switch (struct_ty.zigTypeTag(mod)) { .Struct => { assert(struct_ty.containerLayout() != .Packed); var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); - if (isByRef(field_ty)) { + if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; - return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(target), false); + return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(mod), false); } else { return self.load(field_ptr, field_ptr_ty); } }, .Union => { const union_llvm_ty = try self.dg.lowerType(struct_ty); - const layout = struct_ty.unionGetLayout(target); + const layout = struct_ty.unionGetLayout(mod); const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_llvm_val, payload_index, ""); const llvm_field_ty = try self.dg.lowerType(field_ty); - if (isByRef(field_ty)) { + if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; @@ -6072,6 +6089,7 @@ pub const FuncGen = struct { } fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; @@ -6079,7 +6097,7 @@ pub const FuncGen = struct { const target = self.dg.module.getTarget(); const parent_ty = self.air.getRefType(ty_pl.ty).childType(); - const field_offset = parent_ty.structFieldOffset(extra.field_index, target); + const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); if (field_offset == 0) { @@ -6119,12 +6137,13 @@ pub const FuncGen = struct { } fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const dib = self.dg.object.di_builder orelse return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const decl_index = func.owner_decl; - const decl = self.dg.module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); self.di_file = di_file; const line_number = decl.src_line + 1; @@ -6136,22 +6155,41 @@ pub const FuncGen = struct { .base_line = self.base_line, }); - const fqn = try decl.getFullyQualifiedName(self.dg.module); + const fqn = try decl.getFullyQualifiedName(mod); defer self.gpa.free(fqn); - const is_internal_linkage = !self.dg.module.decl_exports.contains(decl_index); + const is_internal_linkage = !mod.decl_exports.contains(decl_index); + var fn_ty_pl: Type.Payload.Function = .{ + .base = .{ .tag = .function }, + .data = .{ + .param_types = &.{}, + .comptime_params = undefined, + .return_type = Type.void, + .alignment = 0, + .noalias_bits = 0, + .cc = .Unspecified, + .is_var_args = false, + .is_generic = false, + .is_noinline = false, + .align_is_generic = false, + .cc_is_generic = false, + .section_is_generic = false, + .addrspace_is_generic = false, + }, + }; + const fn_ty = Type.initPayload(&fn_ty_pl.base); const subprogram = dib.createFunction( di_file.toScope(), decl.name, fqn, di_file, line_number, - try self.dg.object.lowerDebugType(Type.initTag(.fn_void_no_args), .full), + try self.dg.object.lowerDebugType(fn_ty, .full), is_internal_linkage, true, // is definition line_number + func.lbrace_line, // scope line llvm.DIFlags.StaticMember, - self.dg.module.comp.bin_file.options.optimize_mode != .Debug, + mod.comp.bin_file.options.optimize_mode != .Debug, null, // decl_subprogram ); @@ -6243,10 +6281,11 @@ pub const FuncGen = struct { null; const debug_loc = llvm.getDebugLoc(self.prev_dbg_line, self.prev_dbg_column, self.di_scope.?, inlined_at); const insert_block = self.builder.getInsertBlock(); - if (isByRef(operand_ty)) { + const mod = self.dg.module; + if (isByRef(operand_ty, mod)) { _ = dib.insertDeclareAtEnd(operand, di_local_var, debug_loc, insert_block); } else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = operand_ty.abiAlignment(self.dg.module.getTarget()); + const alignment = operand_ty.abiAlignment(mod); const alloca = self.buildAlloca(operand.typeOf(), alignment); const store_inst = self.builder.buildStore(operand, alloca); store_inst.setAlignment(alignment); @@ -6294,7 +6333,8 @@ pub const FuncGen = struct { // This stores whether we need to add an elementtype attribute and // if so, the element type itself. const llvm_param_attrs = try arena.alloc(?*llvm.Type, max_param_count); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); var llvm_ret_i: usize = 0; var llvm_param_i: usize = 0; @@ -6322,7 +6362,7 @@ pub const FuncGen = struct { if (output != .none) { const output_inst = try self.resolveInst(output); const output_ty = self.air.typeOf(output); - assert(output_ty.zigTypeTag() == .Pointer); + assert(output_ty.zigTypeTag(mod) == .Pointer); const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType()); if (llvm_ret_indirect[i]) { @@ -6376,13 +6416,13 @@ pub const FuncGen = struct { const arg_llvm_value = try self.resolveInst(input); const arg_ty = self.air.typeOf(input); var llvm_elem_ty: ?*llvm.Type = null; - if (isByRef(arg_ty)) { + if (isByRef(arg_ty, mod)) { llvm_elem_ty = try self.dg.lowerPtrElemTy(arg_ty); if (constraintAllowsMemory(constraint)) { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const arg_llvm_ty = try self.dg.lowerType(arg_ty); const load_inst = self.builder.buildLoad(arg_llvm_ty, arg_llvm_value, ""); load_inst.setAlignment(alignment); @@ -6394,7 +6434,7 @@ pub const FuncGen = struct { llvm_param_values[llvm_param_i] = arg_llvm_value; llvm_param_types[llvm_param_i] = arg_llvm_value.typeOf(); } else { - const alignment = arg_ty.abiAlignment(target); + const alignment = arg_ty.abiAlignment(mod); const arg_ptr = self.buildAlloca(arg_llvm_value.typeOf(), alignment); const store_inst = self.builder.buildStore(arg_llvm_value, arg_ptr); store_inst.setAlignment(alignment); @@ -6599,7 +6639,7 @@ pub const FuncGen = struct { const output_ptr_ty = self.air.typeOf(output); const store_inst = self.builder.buildStore(output_value, output_ptr); - store_inst.setAlignment(output_ptr_ty.ptrAlignment(target)); + store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod)); } else { ret_val = output_value; } @@ -6622,7 +6662,8 @@ pub const FuncGen = struct { const optional_llvm_ty = try self.dg.lowerType(optional_ty); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (optional_ty.optionalReprIsPayload()) { + const mod = self.dg.module; + if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") else @@ -6638,7 +6679,7 @@ pub const FuncGen = struct { comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") else @@ -6647,7 +6688,7 @@ pub const FuncGen = struct { return self.builder.buildICmp(pred, loaded, llvm_i8.constNull(), ""); } - const is_by_ref = operand_is_ptr or isByRef(optional_ty); + const is_by_ref = operand_is_ptr or isByRef(optional_ty, mod); const non_null_bit = self.optIsNonNull(optional_llvm_ty, operand, is_by_ref); if (pred == .EQ) { return self.builder.buildNot(non_null_bit, ""); @@ -6662,6 +6703,7 @@ pub const FuncGen = struct { op: llvm.IntPredicate, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.air.typeOf(un_op); @@ -6679,7 +6721,7 @@ pub const FuncGen = struct { } } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(try self.dg.lowerType(err_union_ty), operand, "") else @@ -6687,10 +6729,9 @@ pub const FuncGen = struct { return self.builder.buildICmp(op, loaded, zero, ""); } - const target = self.dg.module.getTarget(); - const err_field_index = errUnionErrorOffset(payload_ty, target); + const err_field_index = errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty)) { + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, err_field_index, ""); const loaded = self.builder.buildLoad(err_set_ty, err_field_ptr, ""); @@ -6702,17 +6743,18 @@ pub const FuncGen = struct { } fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. return operand; } - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // The payload and the optional are the same value. return operand; } @@ -6723,18 +6765,19 @@ pub const FuncGen = struct { fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { comptime assert(optional_layout_version == 3); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); const non_null_bit = self.context.intType(8).constInt(1, .False); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_null_bit, operand); return operand; } - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // The payload and the optional are the same value. // Setting to non-null will be done when the payload is set. return operand; @@ -6754,20 +6797,21 @@ pub const FuncGen = struct { } fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // Payload value is the same as the optional value. return operand; } const opt_llvm_ty = try self.dg.lowerType(optional_ty); - const can_elide_load = if (isByRef(payload_ty)) self.canElideLoad(body_tail) else false; + const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load); } @@ -6776,6 +6820,7 @@ pub const FuncGen = struct { body_tail: []const Air.Inst.Index, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); @@ -6783,25 +6828,24 @@ pub const FuncGen = struct { const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const result_ty = self.air.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; - const target = self.dg.module.getTarget(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return if (operand_is_ptr) operand else null; } - const offset = errUnionPayloadOffset(payload_ty, target); + const offset = errUnionPayloadOffset(payload_ty, mod); const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); if (operand_is_ptr) { return self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); - } else if (isByRef(err_union_ty)) { + } else if (isByRef(err_union_ty, mod)) { const payload_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); - if (isByRef(payload_ty)) { + if (isByRef(payload_ty, mod)) { if (self.canElideLoad(body_tail)) return payload_ptr; - return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(target), false); + return self.loadByRef(payload_ptr, payload_ty, payload_ty.abiAlignment(mod), false); } const load_inst = self.builder.buildLoad(err_union_llvm_ty.structGetTypeAtIndex(offset), payload_ptr, ""); - load_inst.setAlignment(payload_ty.abiAlignment(target)); + load_inst.setAlignment(payload_ty.abiAlignment(mod)); return load_inst; } return self.builder.buildExtractValue(operand, offset, ""); @@ -6812,6 +6856,7 @@ pub const FuncGen = struct { inst: Air.Inst.Index, operand_is_ptr: bool, ) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); @@ -6828,15 +6873,14 @@ pub const FuncGen = struct { const err_set_llvm_ty = try self.dg.lowerType(Type.anyerror); const payload_ty = err_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (!operand_is_ptr) return operand; return self.builder.buildLoad(err_set_llvm_ty, operand, ""); } - const target = self.dg.module.getTarget(); - const offset = errUnionErrorOffset(payload_ty, target); + const offset = errUnionErrorOffset(payload_ty, mod); - if (operand_is_ptr or isByRef(err_union_ty)) { + if (operand_is_ptr or isByRef(err_union_ty, mod)) { const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); const err_field_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, offset, ""); return self.builder.buildLoad(err_set_llvm_ty, err_field_ptr, ""); @@ -6846,30 +6890,30 @@ pub const FuncGen = struct { } fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const err_union_ty = self.air.typeOf(ty_op.operand).childType(); const payload_ty = err_union_ty.errorUnionPayload(); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); return operand; } - const target = self.dg.module.getTarget(); const err_union_llvm_ty = try self.dg.lowerType(err_union_ty); { - const error_offset = errUnionErrorOffset(payload_ty, target); + const error_offset = errUnionErrorOffset(payload_ty, mod); // First set the non-error value. const non_null_ptr = self.builder.buildStructGEP(err_union_llvm_ty, operand, error_offset, ""); const store_inst = self.builder.buildStore(non_error_val, non_null_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); } // Then return the payload pointer (only if it is used). if (self.liveness.isUnused(inst)) return null; - const payload_offset = errUnionPayloadOffset(payload_ty, target); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); return self.builder.buildStructGEP(err_union_llvm_ty, operand, payload_offset, ""); } @@ -6885,15 +6929,14 @@ pub const FuncGen = struct { } fn airSaveErrReturnTraceIndex(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const target = self.dg.module.getTarget(); - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; //const struct_ty = try self.resolveInst(ty_pl.ty); const struct_ty = self.air.getRefType(ty_pl.ty); const field_index = ty_pl.payload; var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?; + const mod = self.dg.module; + const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, ""); const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); @@ -6901,20 +6944,20 @@ pub const FuncGen = struct { } fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); const non_null_bit = self.context.intType(8).constInt(1, .False); comptime assert(optional_layout_version == 3); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return non_null_bit; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return operand; } const llvm_optional_ty = try self.dg.lowerType(optional_ty); - if (isByRef(optional_ty)) { - const target = self.dg.module.getTarget(); - const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(target)); + if (isByRef(optional_ty, mod)) { + const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -6931,24 +6974,24 @@ pub const FuncGen = struct { } fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.air.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } const ok_err_code = (try self.dg.lowerType(Type.anyerror)).constNull(); const err_un_llvm_ty = try self.dg.lowerType(err_un_ty); - const target = self.dg.module.getTarget(); - const payload_offset = errUnionPayloadOffset(payload_ty, target); - const error_offset = errUnionErrorOffset(payload_ty, target); - if (isByRef(err_un_ty)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target)); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); + const error_offset = errUnionErrorOffset(payload_ty, mod); + if (isByRef(err_un_ty, mod)) { + const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); const store_inst = self.builder.buildStore(ok_err_code, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -6964,23 +7007,23 @@ pub const FuncGen = struct { } fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.air.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } const err_un_llvm_ty = try self.dg.lowerType(err_un_ty); - const target = self.dg.module.getTarget(); - const payload_offset = errUnionPayloadOffset(payload_ty, target); - const error_offset = errUnionErrorOffset(payload_ty, target); - if (isByRef(err_un_ty)) { - const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(target)); + const payload_offset = errUnionPayloadOffset(payload_ty, mod); + const error_offset = errUnionErrorOffset(payload_ty, mod); + if (isByRef(err_un_ty, mod)) { + const result_ptr = self.buildAlloca(err_un_llvm_ty, err_un_ty.abiAlignment(mod)); const err_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, error_offset, ""); const store_inst = self.builder.buildStore(operand, err_ptr); - store_inst.setAlignment(Type.anyerror.abiAlignment(target)); + store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -7021,6 +7064,7 @@ pub const FuncGen = struct { } fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const data = self.air.instructions.items(.data)[inst].vector_store_elem; const extra = self.air.extraData(Air.Bin, data.payload).data; @@ -7032,8 +7076,7 @@ pub const FuncGen = struct { const loaded_vector = blk: { const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType()); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); - const target = self.dg.module.getTarget(); - load_inst.setAlignment(vector_ptr_ty.ptrAlignment(target)); + load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr())); break :blk load_inst; }; @@ -7043,24 +7086,26 @@ pub const FuncGen = struct { } fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(); + const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSMin(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, ""); return self.builder.buildUMin(lhs, rhs, ""); } fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(); + const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSMax(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, ""); return self.builder.buildUMax(lhs, rhs, ""); } @@ -7081,14 +7126,15 @@ pub const FuncGen = struct { fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWAdd(lhs, rhs, ""); return self.builder.buildNUWAdd(lhs, rhs, ""); } @@ -7103,14 +7149,15 @@ pub const FuncGen = struct { } fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSAddSat(lhs, rhs, ""); return self.builder.buildUAddSat(lhs, rhs, ""); } @@ -7118,14 +7165,15 @@ pub const FuncGen = struct { fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWSub(lhs, rhs, ""); return self.builder.buildNUWSub(lhs, rhs, ""); } @@ -7140,28 +7188,30 @@ pub const FuncGen = struct { } fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSSubSat(lhs, rhs, ""); return self.builder.buildUSubSat(lhs, rhs, ""); } fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildNSWMul(lhs, rhs, ""); return self.builder.buildNUWMul(lhs, rhs, ""); } @@ -7176,14 +7226,15 @@ pub const FuncGen = struct { } fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); - if (scalar_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMulFixSat(lhs, rhs, ""); return self.builder.buildUMulFixSat(lhs, rhs, ""); } @@ -7201,38 +7252,39 @@ pub const FuncGen = struct { fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); return self.buildFloatOp(.trunc, inst_ty, 1, .{result}); } - if (scalar_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSDiv(lhs, rhs, ""); return self.builder.buildUDiv(lhs, rhs, ""); } fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const result = try self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); return self.buildFloatOp(.floor, inst_ty, 1, .{result}); } - if (scalar_ty.isSignedInt()) { - const target = self.dg.module.getTarget(); + if (scalar_ty.isSignedInt(mod)) { const inst_llvm_ty = try self.dg.lowerType(inst_ty); - const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: { + const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; + const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -7258,40 +7310,43 @@ pub const FuncGen = struct { fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildExactSDiv(lhs, rhs, ""); return self.builder.buildExactUDiv(lhs, rhs, ""); } fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); - if (scalar_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, ""); + if (scalar_ty.isSignedInt(mod)) return self.builder.buildSRem(lhs, rhs, ""); return self.builder.buildURem(lhs, rhs, ""); } fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); const inst_llvm_ty = try self.dg.lowerType(inst_ty); - const scalar_ty = inst_ty.scalarType(); + const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { const a = try self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); @@ -7301,10 +7356,9 @@ pub const FuncGen = struct { const ltz = try self.buildFloatCmp(.lt, inst_ty, .{ lhs, zero }); return self.builder.buildSelect(ltz, c, a, ""); } - if (scalar_ty.isSignedInt()) { - const target = self.dg.module.getTarget(); - const scalar_bit_size_minus_one = scalar_ty.bitSize(target) - 1; - const bit_size_minus_one = if (inst_ty.zigTypeTag() == .Vector) const_vector: { + if (scalar_ty.isSignedInt(mod)) { + const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; + const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { const vec_len = inst_ty.vectorLen(); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -7386,6 +7440,7 @@ pub const FuncGen = struct { signed_intrinsic: []const u8, unsigned_intrinsic: []const u8, ) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -7393,16 +7448,14 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); - const intrinsic_name = if (scalar_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic; + const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; const llvm_lhs_ty = try self.dg.lowerType(lhs_ty); const llvm_dest_ty = try self.dg.lowerType(dest_ty); - const tg = self.dg.module.getTarget(); - const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty}); const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, ""); @@ -7410,12 +7463,11 @@ pub const FuncGen = struct { const overflow_bit = self.builder.buildExtractValue(result_struct, 1, ""); var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?; + const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?; + const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?; - if (isByRef(dest_ty)) { - const target = self.dg.module.getTarget(); - const result_alignment = dest_ty.abiAlignment(target); + if (isByRef(dest_ty, mod)) { + const result_alignment = dest_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); { const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); @@ -7486,8 +7538,9 @@ pub const FuncGen = struct { ty: Type, params: [2]*llvm.Value, ) !*llvm.Value { + const mod = self.dg.module; const target = self.dg.module.getTarget(); - const scalar_ty = ty.scalarType(); + const scalar_ty = ty.scalarType(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); if (intrinsicsAllowed(scalar_ty, target)) { @@ -7531,7 +7584,7 @@ pub const FuncGen = struct { .gte => .SGE, }; - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const vec_len = ty.vectorLen(); const vector_result_ty = llvm_i32.vectorType(vec_len); @@ -7587,8 +7640,9 @@ pub const FuncGen = struct { comptime params_len: usize, params: [params_len]*llvm.Value, ) !*llvm.Value { - const target = self.dg.module.getTarget(); - const scalar_ty = ty.scalarType(); + const mod = self.dg.module; + const target = mod.getTarget(); + const scalar_ty = ty.scalarType(mod); const llvm_ty = try self.dg.lowerType(ty); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -7615,7 +7669,7 @@ pub const FuncGen = struct { const one = int_llvm_ty.constInt(1, .False); const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); const sign_mask = one.constShl(shift_amt); - const result = if (ty.zigTypeTag() == .Vector) blk: { + const result = if (ty.zigTypeTag(mod) == .Vector) blk: { const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, ""); const cast_ty = int_llvm_ty.vectorType(ty.vectorLen()); const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, ""); @@ -7662,7 +7716,7 @@ pub const FuncGen = struct { .libc => |fn_name| b: { const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty }; const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result = llvm_ty.getUndef(); return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen()); } @@ -7686,6 +7740,7 @@ pub const FuncGen = struct { } fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -7694,21 +7749,19 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); const dest_ty = self.air.typeOfIndex(inst); const llvm_dest_ty = try self.dg.lowerType(dest_ty); - const tg = self.dg.module.getTarget(); - - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; const result = self.builder.buildShl(lhs, casted_rhs, ""); - const reconstructed = if (lhs_scalar_ty.isSignedInt()) + const reconstructed = if (lhs_scalar_ty.isSignedInt(mod)) self.builder.buildAShr(result, casted_rhs, "") else self.builder.buildLShr(result, casted_rhs, ""); @@ -7716,12 +7769,11 @@ pub const FuncGen = struct { const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, ""); var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, tg, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, tg, &ty_buf).?; + const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?; + const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?; - if (isByRef(dest_ty)) { - const target = self.dg.module.getTarget(); - const result_alignment = dest_ty.abiAlignment(target); + if (isByRef(dest_ty, mod)) { + const result_alignment = dest_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(llvm_dest_ty, result_alignment); { const field_ptr = self.builder.buildStructGEP(llvm_dest_ty, alloca_inst, result_index, ""); @@ -7763,6 +7815,7 @@ pub const FuncGen = struct { } fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7770,20 +7823,19 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - const tg = self.dg.module.getTarget(); - - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; - if (lhs_scalar_ty.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, ""); + if (lhs_scalar_ty.isSignedInt(mod)) return self.builder.buildNSWShl(lhs, casted_rhs, ""); return self.builder.buildNUWShl(lhs, casted_rhs, ""); } fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7791,12 +7843,10 @@ pub const FuncGen = struct { const lhs_type = self.air.typeOf(bin_op.lhs); const rhs_type = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_type.scalarType(); - const rhs_scalar_ty = rhs_type.scalarType(); - - const tg = self.dg.module.getTarget(); + const lhs_scalar_ty = lhs_type.scalarType(mod); + const rhs_scalar_ty = rhs_type.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_type), "") else rhs; @@ -7804,6 +7854,7 @@ pub const FuncGen = struct { } fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7811,17 +7862,16 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const tg = self.dg.module.getTarget(); - const lhs_bits = lhs_scalar_ty.bitSize(tg); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const lhs_bits = lhs_scalar_ty.bitSize(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_bits) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_bits) self.builder.buildZExt(rhs, lhs.typeOf(), "") else rhs; - const result = if (lhs_scalar_ty.isSignedInt()) + const result = if (lhs_scalar_ty.isSignedInt(mod)) self.builder.buildSShlSat(lhs, casted_rhs, "") else self.builder.buildUShlSat(lhs, casted_rhs, ""); @@ -7834,7 +7884,7 @@ pub const FuncGen = struct { const lhs_scalar_llvm_ty = try self.dg.lowerType(lhs_scalar_ty); const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { const vec_len = rhs_ty.vectorLen(); const bits_vec = self.builder.buildVectorSplat(vec_len, bits, ""); const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, ""); @@ -7847,6 +7897,7 @@ pub const FuncGen = struct { } fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -7854,16 +7905,14 @@ pub const FuncGen = struct { const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - - const tg = self.dg.module.getTarget(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) + const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; - const is_signed_int = lhs_scalar_ty.isSignedInt(); + const is_signed_int = lhs_scalar_ty.isSignedInt(mod); if (is_exact) { if (is_signed_int) { @@ -7881,14 +7930,14 @@ pub const FuncGen = struct { } fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const target = self.dg.module.getTarget(); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dest_ty = self.air.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(target); + const dest_info = dest_ty.intInfo(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const operand_info = operand_ty.intInfo(target); + const operand_info = operand_ty.intInfo(mod); if (operand_info.bits < dest_info.bits) { switch (operand_info.signedness) { @@ -7910,11 +7959,12 @@ pub const FuncGen = struct { } fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7939,11 +7989,12 @@ pub const FuncGen = struct { } fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7985,10 +8036,10 @@ pub const FuncGen = struct { } fn bitCast(self: *FuncGen, operand: *llvm.Value, operand_ty: Type, inst_ty: Type) !*llvm.Value { - const operand_is_ref = isByRef(operand_ty); - const result_is_ref = isByRef(inst_ty); + const mod = self.dg.module; + const operand_is_ref = isByRef(operand_ty, mod); + const result_is_ref = isByRef(inst_ty, mod); const llvm_dest_ty = try self.dg.lowerType(inst_ty); - const target = self.dg.module.getTarget(); if (operand_is_ref and result_is_ref) { // They are both pointers, so just return the same opaque pointer :) @@ -8001,20 +8052,20 @@ pub const FuncGen = struct { return self.builder.buildZExtOrBitCast(operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag() == .Int and inst_ty.isPtrAtRuntime()) { + if (operand_ty.zigTypeTag(mod) == .Int and inst_ty.isPtrAtRuntime(mod)) { return self.builder.buildIntToPtr(operand, llvm_dest_ty, ""); } - if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) { + if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { const elem_ty = operand_ty.childType(); if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } const array_ptr = self.buildAlloca(llvm_dest_ty, null); - const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; + const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { const llvm_store = self.builder.buildStore(operand, array_ptr); - llvm_store.setAlignment(inst_ty.abiAlignment(target)); + llvm_store.setAlignment(inst_ty.abiAlignment(mod)); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. @@ -8033,19 +8084,19 @@ pub const FuncGen = struct { } } return array_ptr; - } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) { + } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { const elem_ty = operand_ty.childType(); const llvm_vector_ty = try self.dg.lowerType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); } - const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; + const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8; if (bitcast_ok) { const vector = self.builder.buildLoad(llvm_vector_ty, operand, ""); // The array is aligned to the element's alignment, while the vector might have a completely // different alignment. This means we need to enforce the alignment of this load. - vector.setAlignment(elem_ty.abiAlignment(target)); + vector.setAlignment(elem_ty.abiAlignment(mod)); return vector; } else { // If the ABI size of the element type is not evenly divisible by size in bits; @@ -8073,12 +8124,12 @@ pub const FuncGen = struct { if (operand_is_ref) { const load_inst = self.builder.buildLoad(llvm_dest_ty, operand, ""); - load_inst.setAlignment(operand_ty.abiAlignment(target)); + load_inst.setAlignment(operand_ty.abiAlignment(mod)); return load_inst; } if (result_is_ref) { - const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); + const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); @@ -8089,7 +8140,7 @@ pub const FuncGen = struct { // Both our operand and our result are values, not pointers, // but LLVM won't let us bitcast struct values. // Therefore, we store operand to alloca, then load for result. - const alignment = @max(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); + const alignment = @max(operand_ty.abiAlignment(mod), inst_ty.abiAlignment(mod)); const result_ptr = self.buildAlloca(llvm_dest_ty, alignment); const store_inst = self.builder.buildStore(operand, result_ptr); store_inst.setAlignment(alignment); @@ -8118,12 +8169,13 @@ pub const FuncGen = struct { } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; + const mod = self.dg.module; const func = self.dg.decl.getFunction().?; - const lbrace_line = self.dg.module.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; + const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( self.di_scope.?, - func.getParamName(self.dg.module, src_index).ptr, // TODO test 0 bit args + func.getParamName(mod, src_index).ptr, // TODO test 0 bit args self.di_file.?, lbrace_line, try self.dg.object.lowerDebugType(inst_ty, .full), @@ -8134,10 +8186,10 @@ pub const FuncGen = struct { const debug_loc = llvm.getDebugLoc(lbrace_line, lbrace_col, self.di_scope.?, null); const insert_block = self.builder.getInsertBlock(); - if (isByRef(inst_ty)) { + if (isByRef(inst_ty, mod)) { _ = dib.insertDeclareAtEnd(arg_val, di_local_var, debug_loc, insert_block); } else if (self.dg.module.comp.bin_file.options.optimize_mode == .Debug) { - const alignment = inst_ty.abiAlignment(self.dg.module.getTarget()); + const alignment = inst_ty.abiAlignment(mod); const alloca = self.buildAlloca(arg_val.typeOf(), alignment); const store_inst = self.builder.buildStore(arg_val, alloca); store_inst.setAlignment(alignment); @@ -8153,22 +8205,22 @@ pub const FuncGen = struct { fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ptr_ty = self.air.typeOfIndex(inst); const pointee_type = ptr_ty.childType(); - if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + const mod = self.dg.module; + if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.lowerType(pointee_type); - const target = self.dg.module.getTarget(); - const alignment = ptr_ty.ptrAlignment(target); + const alignment = ptr_ty.ptrAlignment(mod); return self.buildAlloca(pointee_llvm_ty, alignment); } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ptr_ty = self.air.typeOfIndex(inst); const ret_ty = ptr_ty.childType(); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); + const mod = self.dg.module; + if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.lowerType(ret_ty); - const target = self.dg.module.getTarget(); - return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(target)); + return self.buildAlloca(ret_llvm_ty, ptr_ty.ptrAlignment(mod)); } /// Use this instead of builder.buildAlloca, because this function makes sure to @@ -8182,8 +8234,9 @@ pub const FuncGen = struct { const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); + const mod = self.dg.module; - const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8193,13 +8246,12 @@ pub const FuncGen = struct { u8_llvm_ty.constInt(0xaa, .False) else u8_llvm_ty.getUndef(); - const target = self.dg.module.getTarget(); - const operand_size = operand_ty.abiSize(target); + const operand_size = operand_ty.abiSize(mod); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); - const dest_ptr_align = ptr_ty.ptrAlignment(target); + const dest_ptr_align = ptr_ty.ptrAlignment(mod); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr()); - if (safety and self.dg.module.comp.bin_file.options.valgrind) { + if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } return null; @@ -8230,6 +8282,7 @@ pub const FuncGen = struct { } fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !?*llvm.Value { + const mod = fg.dg.module; const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[inst].ty_op; const ptr_ty = fg.air.typeOf(ty_op.operand); @@ -8237,7 +8290,7 @@ pub const FuncGen = struct { const ptr = try fg.resolveInst(ty_op.operand); elide: { - if (!isByRef(ptr_info.pointee_type)) break :elide; + if (!isByRef(ptr_info.pointee_type, mod)) break :elide; if (!canElideLoad(fg, body_tail)) break :elide; return ptr; } @@ -8261,8 +8314,9 @@ pub const FuncGen = struct { fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { _ = inst; + const mod = self.dg.module; const llvm_usize = try self.dg.lowerType(Type.usize); - const target = self.dg.module.getTarget(); + const target = mod.getTarget(); if (!target_util.supportsReturnAddress(target)) { // https://github.com/ziglang/zig/issues/11946 return llvm_usize.constNull(); @@ -8301,6 +8355,7 @@ pub const FuncGen = struct { } fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr = try self.resolveInst(extra.ptr); @@ -8310,7 +8365,7 @@ pub const FuncGen = struct { const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating - if (operand_ty.isSignedInt()) { + if (operand_ty.isSignedInt(mod)) { expected_value = self.builder.buildSExt(expected_value, abi_ty, ""); new_value = self.builder.buildSExt(new_value, abi_ty, ""); } else { @@ -8336,7 +8391,7 @@ pub const FuncGen = struct { } const success_bit = self.builder.buildExtractValue(result, 1, ""); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, ""); } @@ -8347,13 +8402,14 @@ pub const FuncGen = struct { } fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); const ptr_ty = self.air.typeOf(pl_op.operand); const operand_ty = ptr_ty.elemType(); const operand = try self.resolveInst(extra.operand); - const is_signed_int = operand_ty.isSignedInt(); + const is_signed_int = operand_ty.isSignedInt(mod); const is_float = operand_ty.isRuntimeFloat(); const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); const ordering = toLlvmAtomicOrdering(extra.ordering()); @@ -8402,17 +8458,17 @@ pub const FuncGen = struct { } fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.air.typeOf(atomic_load.ptr); const ptr_info = ptr_ty.ptrInfo().data; const elem_ty = ptr_info.pointee_type; - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const ordering = toLlvmAtomicOrdering(atomic_load.order); const opt_abi_llvm_ty = self.dg.getAtomicAbiType(elem_ty, false); - const target = self.dg.module.getTarget(); - const ptr_alignment = ptr_info.alignment(target); + const ptr_alignment = ptr_info.alignment(mod); const ptr_volatile = llvm.Bool.fromBool(ptr_info.@"volatile"); const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -8436,17 +8492,18 @@ pub const FuncGen = struct { inst: Air.Inst.Index, ordering: llvm.AtomicOrdering, ) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null; + if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening - if (operand_ty.isSignedInt()) { + if (operand_ty.isSignedInt(mod)) { element = self.builder.buildSExt(element, abi_ty, ""); } else { element = self.builder.buildZExt(element, abi_ty, ""); @@ -8457,18 +8514,19 @@ pub const FuncGen = struct { } fn airMemset(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = self.air.typeOf(bin_op.rhs); const module = self.dg.module; const target = module.getTarget(); - const dest_ptr_align = ptr_ty.ptrAlignment(target); + const dest_ptr_align = ptr_ty.ptrAlignment(mod); const u8_llvm_ty = self.context.intType(8); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(); - if (self.air.value(bin_op.rhs)) |elem_val| { + if (self.air.value(bin_op.rhs, mod)) |elem_val| { if (elem_val.isUndefDeep()) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8503,7 +8561,7 @@ pub const FuncGen = struct { } const value = try self.resolveInst(bin_op.rhs); - const elem_abi_size = elem_ty.abiSize(target); + const elem_abi_size = elem_ty.abiSize(mod); if (elem_abi_size == 1) { // In this case we can take advantage of LLVM's intrinsic. @@ -8551,9 +8609,9 @@ pub const FuncGen = struct { _ = self.builder.buildCondBr(end, body_block, end_block); self.builder.positionBuilderAtEnd(body_block); - const elem_abi_alignment = elem_ty.abiAlignment(target); + const elem_abi_alignment = elem_ty.abiAlignment(mod); const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align); - if (isByRef(elem_ty)) { + if (isByRef(elem_ty, mod)) { _ = self.builder.buildMemCpy( it_ptr, it_ptr_alignment, @@ -8589,13 +8647,13 @@ pub const FuncGen = struct { const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty); const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); + const mod = self.dg.module; const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr(); - const target = self.dg.module.getTarget(); _ = self.builder.buildMemCpy( dest_ptr, - dest_ptr_ty.ptrAlignment(target), + dest_ptr_ty.ptrAlignment(mod), src_ptr, - src_ptr_ty.ptrAlignment(target), + src_ptr_ty.ptrAlignment(mod), len, is_volatile, ); @@ -8603,10 +8661,10 @@ pub const FuncGen = struct { } fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const un_ty = self.air.typeOf(bin_op.lhs).childType(); - const target = self.dg.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); const new_tag = try self.resolveInst(bin_op.rhs); @@ -8624,13 +8682,13 @@ pub const FuncGen = struct { } fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const un_ty = self.air.typeOf(ty_op.operand); - const target = self.dg.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolveInst(ty_op.operand); - if (isByRef(un_ty)) { + if (isByRef(un_ty, mod)) { const llvm_un_ty = try self.dg.lowerType(un_ty); if (layout.payload_size == 0) { return self.builder.buildLoad(llvm_un_ty, union_handle, ""); @@ -8666,6 +8724,7 @@ pub const FuncGen = struct { } fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); @@ -8679,9 +8738,8 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); - const bits = operand_ty.intInfo(target).bits; - const result_bits = result_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8692,6 +8750,7 @@ pub const FuncGen = struct { } fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); @@ -8704,9 +8763,8 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); - const bits = operand_ty.intInfo(target).bits; - const result_bits = result_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8717,10 +8775,10 @@ pub const FuncGen = struct { } fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { - const target = self.dg.module.getTarget(); + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); - var bits = operand_ty.intInfo(target).bits; + var bits = operand_ty.intInfo(mod).bits; assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); @@ -8730,7 +8788,7 @@ pub const FuncGen = struct { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte // The truncated result at the end will be the correct bswap const scalar_llvm_ty = self.context.intType(bits + 8); - if (operand_ty.zigTypeTag() == .Vector) { + if (operand_ty.zigTypeTag(mod) == .Vector) { const vec_len = operand_ty.vectorLen(); operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len); @@ -8759,7 +8817,7 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); - const result_bits = result_ty.intInfo(target).bits; + const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); } else if (bits < result_bits) { @@ -8770,6 +8828,7 @@ pub const FuncGen = struct { } fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const error_set_ty = self.air.getRefType(ty_op.ty); @@ -8781,7 +8840,7 @@ pub const FuncGen = struct { const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); for (names) |name| { - const err_int = self.dg.module.global_error_set.get(name).?; + const err_int = mod.global_error_set.get(name).?; const this_tag_int_value = int: { var tag_val_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -8841,8 +8900,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(); const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; const llvm_ret_ty = try self.dg.lowerType(Type.bool); @@ -8923,11 +8981,9 @@ pub const FuncGen = struct { const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); const llvm_ret_ty = try self.dg.lowerType(slice_ty); const usize_llvm_ty = try self.dg.lowerType(Type.usize); - const target = self.dg.module.getTarget(); - const slice_alignment = slice_ty.abiAlignment(target); + const slice_alignment = slice_ty.abiAlignment(mod); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(); const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); @@ -9057,6 +9113,7 @@ pub const FuncGen = struct { } fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolveInst(extra.a); @@ -9077,11 +9134,11 @@ pub const FuncGen = struct { for (values, 0..) |*val, i| { var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(self.dg.module, i, &buf); + const elem = mask.elemValueBuffer(mod, i, &buf); if (elem.isUndef()) { val.* = llvm_i32.getUndef(); } else { - const int = elem.toSignedInt(self.dg.module.getTarget()); + const int = elem.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); val.* = llvm_i32.constInt(unsigned, .False); } @@ -9157,7 +9214,8 @@ pub const FuncGen = struct { fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value { self.builder.setFastMath(want_fast_math); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); const reduce = self.air.instructions.items(.data)[inst].reduce; const operand = try self.resolveInst(reduce.operand); @@ -9168,21 +9226,21 @@ pub const FuncGen = struct { .And => return self.builder.buildAndReduce(operand), .Or => return self.builder.buildOrReduce(operand), .Xor => return self.builder.buildXorReduce(operand), - .Min => switch (scalar_ty.zigTypeTag()) { - .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt()), + .Min => switch (scalar_ty.zigTypeTag(mod)) { + .Int => return self.builder.buildIntMinReduce(operand, scalar_ty.isSignedInt(mod)), .Float => if (intrinsicsAllowed(scalar_ty, target)) { return self.builder.buildFPMinReduce(operand); }, else => unreachable, }, - .Max => switch (scalar_ty.zigTypeTag()) { - .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt()), + .Max => switch (scalar_ty.zigTypeTag(mod)) { + .Int => return self.builder.buildIntMaxReduce(operand, scalar_ty.isSignedInt(mod)), .Float => if (intrinsicsAllowed(scalar_ty, target)) { return self.builder.buildFPMaxReduce(operand); }, else => unreachable, }, - .Add => switch (scalar_ty.zigTypeTag()) { + .Add => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildAddReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -9191,7 +9249,7 @@ pub const FuncGen = struct { }, else => unreachable, }, - .Mul => switch (scalar_ty.zigTypeTag()) { + .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int => return self.builder.buildMulReduce(operand), .Float => if (intrinsicsAllowed(scalar_ty, target)) { const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); @@ -9247,9 +9305,9 @@ pub const FuncGen = struct { const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.lowerType(result_ty); - const target = self.dg.module.getTarget(); + const mod = self.dg.module; - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Vector => { const llvm_u32 = self.context.intType(32); @@ -9265,7 +9323,7 @@ pub const FuncGen = struct { if (result_ty.containerLayout() == .Packed) { const struct_obj = result_ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(target); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const fields = struct_obj.fields.values(); comptime assert(Type.packed_struct_layout_version == 2); @@ -9273,12 +9331,12 @@ pub const FuncGen = struct { var running_bits: u16 = 0; for (elements, 0..) |elem, i| { const field = fields[i]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const non_int_val = try self.resolveInst(elem); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = self.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else self.builder.buildBitCast(non_int_val, small_int_ty, ""); @@ -9296,24 +9354,24 @@ pub const FuncGen = struct { var ptr_ty_buf: Type.Payload.Pointer = undefined; - if (isByRef(result_ty)) { + if (isByRef(result_ty, mod)) { const llvm_u32 = self.context.intType(32); // TODO in debug builds init to undef so that the padding will be 0xaa // even if we fully populate the fields. - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target)); + const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(i) != null) continue; + if (result_ty.structFieldValueComptime(mod, i) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?; + const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; indices[1] = llvm_u32.constInt(llvm_i, .False); const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); var field_ptr_payload: Type.Payload.Pointer = .{ .data = .{ .pointee_type = self.air.typeOf(elem), - .@"align" = result_ty.structFieldAlign(i, target), + .@"align" = result_ty.structFieldAlign(i, mod), .@"addrspace" = .generic, }, }; @@ -9325,20 +9383,20 @@ pub const FuncGen = struct { } else { var result = llvm_result_ty.getUndef(); for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(i) != null) continue; + if (result_ty.structFieldValueComptime(mod, i) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, target, &ptr_ty_buf).?; + const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, ""); } return result; } }, .Array => { - assert(isByRef(result_ty)); + assert(isByRef(result_ty, mod)); const llvm_usize = try self.dg.lowerType(Type.usize); - const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(target)); + const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); const array_info = result_ty.arrayInfo(); var elem_ptr_payload: Type.Payload.Pointer = .{ @@ -9379,22 +9437,22 @@ pub const FuncGen = struct { } fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.air.typeOfIndex(inst); const union_llvm_ty = try self.dg.lowerType(union_ty); - const target = self.dg.module.getTarget(); - const layout = union_ty.unionGetLayout(target); + const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; if (union_obj.layout == .Packed) { - const big_bits = union_ty.bitSize(target); + const big_bits = union_ty.bitSize(mod); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); const field = union_obj.fields.values()[extra.field_index]; const non_int_val = try self.resolveInst(extra.init); - const ty_bit_size = @intCast(u16, field.ty.bitSize(target)); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); const small_int_ty = self.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime()) + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(non_int_val, small_int_ty, "") else self.builder.buildBitCast(non_int_val, small_int_ty, ""); @@ -9412,16 +9470,16 @@ pub const FuncGen = struct { const tag_val = Value.initPayload(&tag_val_payload.base); var int_payload: Value.Payload.U64 = undefined; const tag_int_val = tag_val.enumToInt(tag_ty, &int_payload); - break :blk tag_int_val.toUnsignedInt(target); + break :blk tag_int_val.toUnsignedInt(mod); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { return null; } - assert(!isByRef(union_ty)); + assert(!isByRef(union_ty, mod)); return union_llvm_ty.constInt(tag_int, .False); } - assert(isByRef(union_ty)); + assert(isByRef(union_ty, mod)); // The llvm type of the alloca will be the named LLVM union type, and will not // necessarily match the format that we need, depending on which tag is active. // We must construct the correct unnamed struct type here, in order to then set @@ -9431,12 +9489,12 @@ pub const FuncGen = struct { assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; const field_llvm_ty = try self.dg.lowerType(field.ty); - const field_size = field.ty.abiSize(target); - const field_align = field.normalAlignment(target); + const field_size = field.ty.abiSize(mod); + const field_align = field.normalAlignment(mod); const llvm_union_ty = t: { const payload = p: { - if (!field.ty.hasRuntimeBitsIgnoreComptime()) { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) { const padding_len = @intCast(c_uint, layout.payload_size); break :p self.context.intType(8).arrayType(padding_len); } @@ -9511,7 +9569,7 @@ pub const FuncGen = struct { const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty); const llvm_tag = tag_llvm_ty.constInt(tag_int, .False); const store_inst = self.builder.buildStore(llvm_tag, field_ptr); - store_inst.setAlignment(union_obj.tag_ty.abiAlignment(target)); + store_inst.setAlignment(union_obj.tag_ty.abiAlignment(mod)); } return result_ptr; @@ -9535,7 +9593,8 @@ pub const FuncGen = struct { // by the target. // To work around this, don't emit llvm.prefetch in this case. // See https://bugs.llvm.org/show_bug.cgi?id=21037 - const target = self.dg.module.getTarget(); + const mod = self.dg.module; + const target = mod.getTarget(); switch (prefetch.cache) { .instruction => switch (target.cpu.arch) { .x86_64, @@ -9658,8 +9717,9 @@ pub const FuncGen = struct { return table; } + const mod = self.dg.module; const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget()); + const slice_alignment = slice_ty.abiAlignment(mod); const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table"); @@ -9703,14 +9763,14 @@ pub const FuncGen = struct { ) !*llvm.Value { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); + const mod = fg.dg.module; - if (isByRef(opt_ty)) { + if (isByRef(opt_ty, mod)) { // We have a pointer and we need to return a pointer to the first field. const payload_ptr = fg.builder.buildStructGEP(opt_llvm_ty, opt_handle, 0, ""); - const target = fg.dg.module.getTarget(); - const payload_alignment = payload_ty.abiAlignment(target); - if (isByRef(payload_ty)) { + const payload_alignment = payload_ty.abiAlignment(mod); + if (isByRef(payload_ty, mod)) { if (can_elide_load) return payload_ptr; @@ -9722,7 +9782,7 @@ pub const FuncGen = struct { return load_inst; } - assert(!isByRef(payload_ty)); + assert(!isByRef(payload_ty, mod)); return fg.builder.buildExtractValue(opt_handle, 0, ""); } @@ -9734,10 +9794,10 @@ pub const FuncGen = struct { ) !?*llvm.Value { const optional_llvm_ty = try self.dg.lowerType(optional_ty); const non_null_field = self.builder.buildZExt(non_null_bit, self.context.intType(8), ""); + const mod = self.dg.module; - if (isByRef(optional_ty)) { - const target = self.dg.module.getTarget(); - const payload_alignment = optional_ty.abiAlignment(target); + if (isByRef(optional_ty, mod)) { + const payload_alignment = optional_ty.abiAlignment(mod); const alloca_inst = self.buildAlloca(optional_llvm_ty, payload_alignment); { @@ -9765,9 +9825,9 @@ pub const FuncGen = struct { struct_ptr_ty: Type, field_index: u32, ) !?*llvm.Value { - const target = self.dg.object.target; const struct_ty = struct_ptr_ty.childType(); - switch (struct_ty.zigTypeTag()) { + const mod = self.dg.module; + switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { const result_ty = self.air.typeOfIndex(inst); @@ -9783,7 +9843,7 @@ pub const FuncGen = struct { // We have a pointer to a packed struct field that happens to be byte-aligned. // Offset our operand pointer by the correct number of bytes. - const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, target); + const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, mod); if (byte_offset == 0) return struct_ptr; const byte_llvm_ty = self.context.intType(8); const llvm_usize = try self.dg.lowerType(Type.usize); @@ -9795,7 +9855,7 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty); var ty_buf: Type.Payload.Pointer = undefined; - if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| { + if (llvmFieldIndex(struct_ty, field_index, mod, &ty_buf)) |llvm_field_index| { return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field_index, ""); } else { // If we found no index then this means this is a zero sized field at the @@ -9803,14 +9863,14 @@ pub const FuncGen = struct { // the index to the element at index `1` to get a pointer to the end of // the struct. const llvm_u32 = self.context.intType(32); - const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime()), .False); + const llvm_index = llvm_u32.constInt(@boolToInt(struct_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); const indices: [1]*llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, ""); } }, }, .Union => { - const layout = struct_ty.unionGetLayout(target); + const layout = struct_ty.unionGetLayout(mod); if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr; const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const union_llvm_ty = try self.dg.lowerType(struct_ty); @@ -9835,12 +9895,12 @@ pub const FuncGen = struct { ptr_alignment: u32, is_volatile: bool, ) !*llvm.Value { + const mod = fg.dg.module; const pointee_llvm_ty = try fg.dg.lowerType(pointee_type); - const target = fg.dg.module.getTarget(); - const result_align = @max(ptr_alignment, pointee_type.abiAlignment(target)); + const result_align = @max(ptr_alignment, pointee_type.abiAlignment(mod)); const result_ptr = fg.buildAlloca(pointee_llvm_ty, result_align); - const llvm_usize = fg.context.intType(Type.usize.intInfo(target).bits); - const size_bytes = pointee_type.abiSize(target); + const llvm_usize = fg.context.intType(Type.usize.intInfo(mod).bits); + const size_bytes = pointee_type.abiSize(mod); _ = fg.builder.buildMemCpy( result_ptr, result_align, @@ -9856,11 +9916,11 @@ pub const FuncGen = struct { /// alloca and copies the value into it, then returns the alloca instruction. /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value { + const mod = self.dg.module; const info = ptr_ty.ptrInfo().data; - if (!info.pointee_type.hasRuntimeBitsIgnoreComptime()) return null; + if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null; - const target = self.dg.module.getTarget(); - const ptr_alignment = info.alignment(target); + const ptr_alignment = info.alignment(mod); const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr()); assert(info.vector_index != .runtime); @@ -9877,7 +9937,7 @@ pub const FuncGen = struct { } if (info.host_size == 0) { - if (isByRef(info.pointee_type)) { + if (isByRef(info.pointee_type, mod)) { return self.loadByRef(ptr, info.pointee_type, ptr_alignment, info.@"volatile"); } const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); @@ -9892,13 +9952,13 @@ pub const FuncGen = struct { containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); + const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); - if (isByRef(info.pointee_type)) { - const result_align = info.pointee_type.abiAlignment(target); + if (isByRef(info.pointee_type, mod)) { + const result_align = info.pointee_type.abiAlignment(mod); const result_ptr = self.buildAlloca(elem_llvm_ty, result_align); const same_size_int = self.context.intType(elem_bits); @@ -9908,13 +9968,13 @@ pub const FuncGen = struct { return result_ptr; } - if (info.pointee_type.zigTypeTag() == .Float or info.pointee_type.zigTypeTag() == .Vector) { + if (info.pointee_type.zigTypeTag(mod) == .Float or info.pointee_type.zigTypeTag(mod) == .Vector) { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildBitCast(truncated_int, elem_llvm_ty, ""); } - if (info.pointee_type.isPtrAtRuntime()) { + if (info.pointee_type.isPtrAtRuntime(mod)) { const same_size_int = self.context.intType(elem_bits); const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, ""); return self.builder.buildIntToPtr(truncated_int, elem_llvm_ty, ""); @@ -9932,11 +9992,11 @@ pub const FuncGen = struct { ) !void { const info = ptr_ty.ptrInfo().data; const elem_ty = info.pointee_type; - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + const mod = self.dg.module; + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { return; } - const target = self.dg.module.getTarget(); - const ptr_alignment = ptr_ty.ptrAlignment(target); + const ptr_alignment = ptr_ty.ptrAlignment(mod); const ptr_volatile = llvm.Bool.fromBool(info.@"volatile"); assert(info.vector_index != .runtime); @@ -9964,13 +10024,13 @@ pub const FuncGen = struct { assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); + const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); const containing_int_ty = containing_int.typeOf(); const shift_amt = containing_int_ty.constInt(info.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit // operations on the value to store const value_bits_type = self.context.intType(elem_bits); - const value_bits = if (elem_ty.isPtrAtRuntime()) + const value_bits = if (elem_ty.isPtrAtRuntime(mod)) self.builder.buildPtrToInt(elem, value_bits_type, "") else self.builder.buildBitCast(elem, value_bits_type, ""); @@ -9991,7 +10051,7 @@ pub const FuncGen = struct { store_inst.setVolatile(ptr_volatile); return; } - if (!isByRef(elem_ty)) { + if (!isByRef(elem_ty, mod)) { const store_inst = self.builder.buildStore(elem, ptr); store_inst.setOrdering(ordering); store_inst.setAlignment(ptr_alignment); @@ -9999,13 +10059,13 @@ pub const FuncGen = struct { return; } assert(ordering == .NotAtomic); - const size_bytes = elem_ty.abiSize(target); + const size_bytes = elem_ty.abiSize(mod); _ = self.builder.buildMemCpy( ptr, ptr_alignment, elem, - elem_ty.abiAlignment(target), - self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False), + elem_ty.abiAlignment(mod), + self.context.intType(Type.usize.intInfo(mod).bits).constInt(size_bytes, .False), info.@"volatile", ); } @@ -10030,11 +10090,12 @@ pub const FuncGen = struct { a4: *llvm.Value, a5: *llvm.Value, ) *llvm.Value { - const target = fg.dg.module.getTarget(); + const mod = fg.dg.module; + const target = mod.getTarget(); if (!target_util.hasValgrindSupport(target)) return default_value; const usize_llvm_ty = fg.context.intType(target.ptrBitWidth()); - const usize_alignment = @intCast(c_uint, Type.usize.abiSize(target)); + const usize_alignment = @intCast(c_uint, Type.usize.abiSize(mod)); const array_llvm_ty = usize_llvm_ty.arrayType(6); const array_ptr = fg.valgrind_client_request_array orelse a: { @@ -10451,7 +10512,7 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ fn llvmFieldIndex( ty: Type, field_index: usize, - target: std.Target, + mod: *const Module, ptr_pl_buf: *Type.Payload.Pointer, ) ?c_uint { // Detects where we inserted extra padding fields so that we can skip @@ -10464,9 +10525,9 @@ fn llvmFieldIndex( const tuple = ty.tupleFields(); var llvm_field_index: c_uint = 0; for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -10488,7 +10549,7 @@ fn llvmFieldIndex( } llvm_field_index += 1; - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } return null; } @@ -10496,10 +10557,10 @@ fn llvmFieldIndex( assert(layout != .Packed); var llvm_field_index: c_uint = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(); + var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; - const field_align = field.alignment(target, layout); + const field_align = field.alignment(mod, layout); big_align = @max(big_align, field_align); const prev_offset = offset; offset = std.mem.alignForwardGeneric(u64, offset, field_align); @@ -10521,43 +10582,44 @@ fn llvmFieldIndex( } llvm_field_index += 1; - offset += field.ty.abiSize(target); + offset += field.ty.abiSize(mod); } else { // We did not find an llvm field that corresponds to this zig field. return null; } } -fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) return false; +fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *const Module) bool { + if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false; + const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => return isByRef(fn_info.return_type), + .Unspecified, .Inline => return isByRef(fn_info.return_type, mod), .C => switch (target.cpu.arch) { .mips, .mipsel => return false, .x86_64 => switch (target.os.tag) { - .windows => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory, - else => return firstParamSRetSystemV(fn_info.return_type, target), + .windows => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, + else => return firstParamSRetSystemV(fn_info.return_type, mod), }, - .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, target)[0] == .indirect, - .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) { + .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, mod)[0] == .indirect, + .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, mod) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { .memory, .i64_array => return true, .i32_array => |size| return size != 1, .byval => return false, }, - .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, target) == .memory, + .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, mod) == .memory, else => return false, // TODO investigate C ABI for other architectures }, - .SysV => return firstParamSRetSystemV(fn_info.return_type, target), - .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory, - .Stdcall => return !isScalar(fn_info.return_type), + .SysV => return firstParamSRetSystemV(fn_info.return_type, mod), + .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, + .Stdcall => return !isScalar(mod, fn_info.return_type), else => return false, } } -fn firstParamSRetSystemV(ty: Type, target: std.Target) bool { - const class = x86_64_abi.classifySystemV(ty, target, .ret); +fn firstParamSRetSystemV(ty: Type, mod: *const Module) bool { + const class = x86_64_abi.classifySystemV(ty, mod, .ret); if (class[0] == .memory) return true; if (class[0] == .x87 and class[2] != .none) return true; return false; @@ -10567,20 +10629,21 @@ fn firstParamSRetSystemV(ty: Type, target: std.Target) bool { /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) { + const mod = dg.module; + if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (fn_info.return_type.isError()) { + if (fn_info.return_type.isError(mod)) { return dg.lowerType(Type.anyerror); } else { return dg.context.voidType(); } } - const target = dg.module.getTarget(); + const target = mod.getTarget(); switch (fn_info.cc) { .Unspecified, .Inline => { - if (isByRef(fn_info.return_type)) { + if (isByRef(fn_info.return_type, mod)) { return dg.context.voidType(); } else { return dg.lowerType(fn_info.return_type); @@ -10594,33 +10657,33 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { else => return lowerSystemVFnRetTy(dg, fn_info), }, .wasm32 => { - if (isScalar(fn_info.return_type)) { + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } - const classes = wasm_c_abi.classifyType(fn_info.return_type, target); + const classes = wasm_c_abi.classifyType(fn_info.return_type, mod); if (classes[0] == .indirect or classes[0] == .none) { return dg.context.voidType(); } assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, target); - const abi_size = scalar_type.abiSize(target); + const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, mod); + const abi_size = scalar_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); }, .aarch64, .aarch64_be => { - switch (aarch64_c_abi.classifyType(fn_info.return_type, target)) { + switch (aarch64_c_abi.classifyType(fn_info.return_type, mod)) { .memory => return dg.context.voidType(), .float_array => return dg.lowerType(fn_info.return_type), .byval => return dg.lowerType(fn_info.return_type), .integer => { - const bit_size = fn_info.return_type.bitSize(target); + const bit_size = fn_info.return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => return dg.context.intType(64).arrayType(2), } }, .arm, .armeb => { - switch (arm_c_abi.classifyType(fn_info.return_type, target, .ret)) { + switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { .memory, .i64_array => return dg.context.voidType(), .i32_array => |len| if (len == 1) { return dg.context.intType(32); @@ -10631,10 +10694,10 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { } }, .riscv32, .riscv64 => { - switch (riscv_c_abi.classifyType(fn_info.return_type, target)) { + switch (riscv_c_abi.classifyType(fn_info.return_type, mod)) { .memory => return dg.context.voidType(), .integer => { - const bit_size = fn_info.return_type.bitSize(target); + const bit_size = fn_info.return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => { @@ -10654,7 +10717,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { .Win64 => return lowerWin64FnRetTy(dg, fn_info), .SysV => return lowerSystemVFnRetTy(dg, fn_info), .Stdcall => { - if (isScalar(fn_info.return_type)) { + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } else { return dg.context.voidType(); @@ -10665,13 +10728,13 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { } fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - const target = dg.module.getTarget(); - switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) { + const mod = dg.module; + switch (x86_64_abi.classifyWindows(fn_info.return_type, mod)) { .integer => { - if (isScalar(fn_info.return_type)) { + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } else { - const abi_size = fn_info.return_type.abiSize(target); + const abi_size = fn_info.return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } }, @@ -10683,11 +10746,11 @@ fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.T } fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { - if (isScalar(fn_info.return_type)) { + const mod = dg.module; + if (isScalar(mod, fn_info.return_type)) { return dg.lowerType(fn_info.return_type); } - const target = dg.module.getTarget(); - const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret); + const classes = x86_64_abi.classifySystemV(fn_info.return_type, mod, .ret); if (classes[0] == .memory) { return dg.context.voidType(); } @@ -10728,7 +10791,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm } } if (classes[0] == .integer and classes[1] == .none) { - const abi_size = fn_info.return_type.abiSize(target); + const abi_size = fn_info.return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False); @@ -10739,7 +10802,6 @@ const ParamTypeIterator = struct { fn_info: Type.Payload.Function.Data, zig_index: u32, llvm_index: u32, - target: std.Target, llvm_types_len: u32, llvm_types_buffer: [8]*llvm.Type, byval_attr: bool, @@ -10779,7 +10841,10 @@ const ParamTypeIterator = struct { } fn nextInner(it: *ParamTypeIterator, ty: Type) ?Lowering { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + const mod = it.dg.module; + const target = mod.getTarget(); + + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { it.zig_index += 1; return .no_bits; } @@ -10788,10 +10853,10 @@ const ParamTypeIterator = struct { it.zig_index += 1; it.llvm_index += 1; var buf: Type.Payload.ElemType = undefined; - if (ty.isSlice() or (ty.zigTypeTag() == .Optional and ty.optionalChild(&buf).isSlice())) { + if (ty.isSlice() or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice())) { it.llvm_index += 1; return .slice; - } else if (isByRef(ty)) { + } else if (isByRef(ty, mod)) { return .byref; } else { return .byval; @@ -10801,23 +10866,23 @@ const ParamTypeIterator = struct { @panic("TODO implement async function lowering in the LLVM backend"); }, .C => { - switch (it.target.cpu.arch) { + switch (target.cpu.arch) { .mips, .mipsel => { it.zig_index += 1; it.llvm_index += 1; return .byval; }, - .x86_64 => switch (it.target.os.tag) { + .x86_64 => switch (target.os.tag) { .windows => return it.nextWin64(ty), else => return it.nextSystemV(ty), }, .wasm32 => { it.zig_index += 1; it.llvm_index += 1; - if (isScalar(ty)) { + if (isScalar(mod, ty)) { return .byval; } - const classes = wasm_c_abi.classifyType(ty, it.target); + const classes = wasm_c_abi.classifyType(ty, mod); if (classes[0] == .indirect) { return .byref; } @@ -10826,7 +10891,7 @@ const ParamTypeIterator = struct { .aarch64, .aarch64_be => { it.zig_index += 1; it.llvm_index += 1; - switch (aarch64_c_abi.classifyType(ty, it.target)) { + switch (aarch64_c_abi.classifyType(ty, mod)) { .memory => return .byref_mut, .float_array => |len| return Lowering{ .float_array = len }, .byval => return .byval, @@ -10841,7 +10906,7 @@ const ParamTypeIterator = struct { .arm, .armeb => { it.zig_index += 1; it.llvm_index += 1; - switch (arm_c_abi.classifyType(ty, it.target, .arg)) { + switch (arm_c_abi.classifyType(ty, mod, .arg)) { .memory => { it.byval_attr = true; return .byref; @@ -10857,7 +10922,7 @@ const ParamTypeIterator = struct { if (ty.tag() == .f16) { return .as_u16; } - switch (riscv_c_abi.classifyType(ty, it.target)) { + switch (riscv_c_abi.classifyType(ty, mod)) { .memory => return .byref_mut, .byval => return .byval, .integer => return .abi_sized_int, @@ -10878,7 +10943,7 @@ const ParamTypeIterator = struct { it.zig_index += 1; it.llvm_index += 1; - if (isScalar(ty)) { + if (isScalar(mod, ty)) { return .byval; } else { it.byval_attr = true; @@ -10894,9 +10959,10 @@ const ParamTypeIterator = struct { } fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering { - switch (x86_64_abi.classifyWindows(ty, it.target)) { + const mod = it.dg.module; + switch (x86_64_abi.classifyWindows(ty, mod)) { .integer => { - if (isScalar(ty)) { + if (isScalar(mod, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -10926,14 +10992,15 @@ const ParamTypeIterator = struct { } fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering { - const classes = x86_64_abi.classifySystemV(ty, it.target, .arg); + const mod = it.dg.module; + const classes = x86_64_abi.classifySystemV(ty, mod, .arg); if (classes[0] == .memory) { it.zig_index += 1; it.llvm_index += 1; it.byval_attr = true; return .byref; } - if (isScalar(ty)) { + if (isScalar(mod, ty)) { it.zig_index += 1; it.llvm_index += 1; return .byval; @@ -10992,7 +11059,6 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp .fn_info = fn_info, .zig_index = 0, .llvm_index = 0, - .target = dg.module.getTarget(), .llvm_types_buffer = undefined, .llvm_types_len = 0, .byval_attr = false, @@ -11001,16 +11067,17 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp fn ccAbiPromoteInt( cc: std.builtin.CallingConvention, - target: std.Target, + mod: *const Module, ty: Type, ) ?std.builtin.Signedness { + const target = mod.getTarget(); switch (cc) { .Unspecified, .Inline, .Async => return null, else => {}, } - const int_info = switch (ty.zigTypeTag()) { - .Bool => Type.u1.intInfo(target), - .Int, .Enum, .ErrorSet => ty.intInfo(target), + const int_info = switch (ty.zigTypeTag(mod)) { + .Bool => Type.u1.intInfo(mod), + .Int, .Enum, .ErrorSet => ty.intInfo(mod), else => return null, }; if (int_info.bits <= 16) return int_info.signedness; @@ -11039,12 +11106,12 @@ fn ccAbiPromoteInt( /// This is the one source of truth for whether a type is passed around as an LLVM pointer, /// or as an LLVM value. -fn isByRef(ty: Type) bool { +fn isByRef(ty: Type, mod: *const Module) bool { // For tuples and structs, if there are more than this many non-void // fields, then we make it byref, otherwise byval. const max_fields_byval = 0; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -11067,7 +11134,7 @@ fn isByRef(ty: Type) bool { .AnyFrame, => return false, - .Array, .Frame => return ty.hasRuntimeBits(), + .Array, .Frame => return ty.hasRuntimeBits(mod), .Struct => { // Packed structs are represented to LLVM as integers. if (ty.containerLayout() == .Packed) return false; @@ -11075,32 +11142,32 @@ fn isByRef(ty: Type) bool { const tuple = ty.tupleFields(); var count: usize = 0; for (tuple.values, 0..) |field_val, i| { - if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue; count += 1; if (count > max_fields_byval) return true; - if (isByRef(tuple.types[i])) return true; + if (isByRef(tuple.types[i], mod)) return true; } return false; } var count: usize = 0; const fields = ty.structFields(); for (fields.values()) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; count += 1; if (count > max_fields_byval) return true; - if (isByRef(field.ty)) return true; + if (isByRef(field.ty, mod)) return true; } return false; }, .Union => switch (ty.containerLayout()) { .Packed => return false, - else => return ty.hasRuntimeBits(), + else => return ty.hasRuntimeBits(mod), }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } return true; @@ -11108,10 +11175,10 @@ fn isByRef(ty: Type) bool { .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return false; } return true; @@ -11119,8 +11186,8 @@ fn isByRef(ty: Type) bool { } } -fn isScalar(ty: Type) bool { - return switch (ty.zigTypeTag()) { +fn isScalar(mod: *const Module, ty: Type) bool { + return switch (ty.zigTypeTag(mod)) { .Void, .Bool, .NoReturn, @@ -11304,12 +11371,12 @@ fn buildAllocaInner( return alloca; } -fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u1 { - return @boolToInt(Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target)); +fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u1 { + return @boolToInt(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod)); } -fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u1 { - return @boolToInt(Type.anyerror.abiAlignment(target) <= payload_ty.abiAlignment(target)); +fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u1 { + return @boolToInt(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod)); } /// Returns true for asm constraint (e.g. "=*m", "=r") if it accepts a memory location diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 09ace669a9..b8c8466427 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -231,9 +231,10 @@ pub const DeclGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { - if (self.air.value(inst)) |val| { + const mod = self.module; + if (self.air.value(inst, mod)) |val| { const ty = self.air.typeOf(inst); - if (ty.zigTypeTag() == .Fn) { + if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (val.tag()) { .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, .function => val.castTag(.function).?.data.owner_decl, @@ -340,8 +341,9 @@ pub const DeclGen = struct { } fn arithmeticTypeInfo(self: *DeclGen, ty: Type) !ArithmeticTypeInfo { + const mod = self.module; const target = self.getTarget(); - return switch (ty.zigTypeTag()) { + return switch (ty.zigTypeTag(mod)) { .Bool => ArithmeticTypeInfo{ .bits = 1, // Doesn't matter for this class. .is_vector = false, @@ -355,7 +357,7 @@ pub const DeclGen = struct { .class = .float, }, .Int => blk: { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); // TODO: Maybe it's useful to also return this value. const maybe_backing_bits = self.backingIntBits(int_info.bits); break :blk ArithmeticTypeInfo{ @@ -533,21 +535,22 @@ pub const DeclGen = struct { } fn addInt(self: *@This(), ty: Type, val: Value) !void { - const target = self.dg.getTarget(); - const int_info = ty.intInfo(target); + const mod = self.dg.module; + const int_info = ty.intInfo(mod); const int_bits = switch (int_info.signedness) { - .signed => @bitCast(u64, val.toSignedInt(target)), - .unsigned => val.toUnsignedInt(target), + .signed => @bitCast(u64, val.toSignedInt(mod)), + .unsigned => val.toUnsignedInt(mod), }; // TODO: Swap endianess if the compiler is big endian. - const len = ty.abiSize(target); + const len = ty.abiSize(mod); try self.addBytes(std.mem.asBytes(&int_bits)[0..@intCast(usize, len)]); } fn addFloat(self: *@This(), ty: Type, val: Value) !void { + const mod = self.dg.module; const target = self.dg.getTarget(); - const len = ty.abiSize(target); + const len = ty.abiSize(mod); // TODO: Swap endianess if the compiler is big endian. switch (ty.floatBits(target)) { @@ -607,15 +610,15 @@ pub const DeclGen = struct { } fn lower(self: *@This(), ty: Type, val: Value) !void { - const target = self.dg.getTarget(); const dg = self.dg; + const mod = dg.module; if (val.isUndef()) { - const size = ty.abiSize(target); + const size = ty.abiSize(mod); return try self.addUndef(size); } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Int => try self.addInt(ty, val), .Float => try self.addFloat(ty, val), .Bool => try self.addConstBool(val.toBool()), @@ -644,7 +647,7 @@ pub const DeclGen = struct { const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; try self.addBytes(bytes); if (ty.sentinel()) |sentinel| { - try self.addByte(@intCast(u8, sentinel.toUnsignedInt(target))); + try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod))); } }, .bytes => { @@ -690,13 +693,13 @@ pub const DeclGen = struct { const struct_begin = self.size; const field_vals = val.castTag(.aggregate).?.data; for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; try self.lower(field.ty, field_vals[i]); // Add padding if required. // TODO: Add to type generation as well? const unpadded_field_end = self.size - struct_begin; - const padded_field_end = ty.structFieldOffset(i + 1, target); + const padded_field_end = ty.structFieldOffset(i + 1, mod); const padding = padded_field_end - unpadded_field_end; try self.addUndef(padding); } @@ -705,13 +708,13 @@ pub const DeclGen = struct { .Optional => { var opt_buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&opt_buf); - const has_payload = !val.isNull(); - const abi_size = ty.abiSize(target); + const has_payload = !val.isNull(mod); + const abi_size = ty.abiSize(mod); - if (!payload_ty.hasRuntimeBits()) { + if (!payload_ty.hasRuntimeBits(mod)) { try self.addConstBool(has_payload); return; - } else if (ty.optionalReprIsPayload()) { + } else if (ty.optionalReprIsPayload(mod)) { // Optional representation is a nullable pointer or slice. if (val.castTag(.opt_payload)) |payload| { try self.lower(payload_ty, payload.data); @@ -729,7 +732,7 @@ pub const DeclGen = struct { // Subtract 1 for @sizeOf(bool). // TODO: Make this not hardcoded. - const payload_size = payload_ty.abiSize(target); + const payload_size = payload_ty.abiSize(mod); const padding = abi_size - payload_size - 1; if (val.castTag(.opt_payload)) |payload| { @@ -744,14 +747,13 @@ pub const DeclGen = struct { var int_val_buffer: Value.Payload.U64 = undefined; const int_val = val.enumToInt(ty, &int_val_buffer); - var int_ty_buffer: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&int_ty_buffer); + const int_ty = ty.intTagType(); try self.lower(int_ty, int_val); }, .Union => { const tag_and_val = val.castTag(.@"union").?.data; - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { return try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); @@ -772,9 +774,9 @@ pub const DeclGen = struct { try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); } - const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: { + const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { try self.lower(active_field_ty, tag_and_val.val); - break :blk active_field_ty.abiSize(target); + break :blk active_field_ty.abiSize(mod); } else 0; const payload_padding_len = layout.payload_size - active_field_size; @@ -808,9 +810,9 @@ pub const DeclGen = struct { return try self.lower(Type.anyerror, error_val); } - const payload_size = payload_ty.abiSize(target); - const error_size = Type.anyerror.abiAlignment(target); - const ty_size = ty.abiSize(target); + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiAlignment(mod); + const ty_size = ty.abiSize(mod); const padding = ty_size - payload_size - error_size; const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); @@ -886,7 +888,7 @@ pub const DeclGen = struct { // .id_result = result_id, // .storage_class = storage_class, // }); - // } else if (ty.abiSize(target) == 0) { + // } else if (ty.abiSize(mod) == 0) { // // Special case: if the type has no size, then return an undefined pointer. // return try section.emit(self.spv.gpa, .OpUndef, .{ // .id_result_type = self.typeId(ptr_ty_ref), @@ -968,6 +970,7 @@ pub const DeclGen = struct { /// is then loaded using OpLoad. Such values are loaded into the UniformConstant storage class by default. /// This function should only be called during function code generation. fn constant(self: *DeclGen, ty: Type, val: Value, repr: Repr) !IdRef { + const mod = self.module; const target = self.getTarget(); const result_ty_ref = try self.resolveType(ty, repr); @@ -977,12 +980,12 @@ pub const DeclGen = struct { return self.spv.constUndef(result_ty_ref); } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Int => { - if (ty.isSignedInt()) { - return try self.spv.constInt(result_ty_ref, val.toSignedInt(target)); + if (ty.isSignedInt(mod)) { + return try self.spv.constInt(result_ty_ref, val.toSignedInt(mod)); } else { - return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(target)); + return try self.spv.constInt(result_ty_ref, val.toUnsignedInt(mod)); } }, .Bool => switch (repr) { @@ -1037,7 +1040,7 @@ pub const DeclGen = struct { // The value cannot be generated directly, so generate it as an indirect constant, // and then perform an OpLoad. const result_id = self.spv.allocId(); - const alignment = ty.abiAlignment(target); + const alignment = ty.abiAlignment(mod); const spv_decl_index = try self.spv.allocDecl(.global); try self.lowerIndirectConstant( @@ -1114,8 +1117,8 @@ pub const DeclGen = struct { /// NOTE: When the active field is set to something other than the most aligned field, the /// resulting struct will be *underaligned*. fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef { - const target = self.getTarget(); - const layout = ty.unionGetLayout(target); + const mod = self.module; + const layout = ty.unionGetLayout(mod); const union_ty = ty.cast(Type.Payload.Union).?.data; if (union_ty.layout == .Packed) { @@ -1143,11 +1146,11 @@ pub const DeclGen = struct { const active_field = maybe_active_field orelse layout.most_aligned_field; const active_field_ty = union_ty.fields.values()[active_field].ty; - const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime()) blk: { + const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { const active_payload_ty_ref = try self.resolveType(active_field_ty, .indirect); member_types.appendAssumeCapacity(active_payload_ty_ref); member_names.appendAssumeCapacity(try self.spv.resolveString("payload")); - break :blk active_field_ty.abiSize(target); + break :blk active_field_ty.abiSize(mod); } else 0; const payload_padding_len = layout.payload_size - active_field_size; @@ -1177,21 +1180,21 @@ pub const DeclGen = struct { /// Turn a Zig type into a SPIR-V Type, and return a reference to it. fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef { + const mod = self.module; log.debug("resolveType: ty = {}", .{ty.fmt(self.module)}); const target = self.getTarget(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return try self.spv.resolve(.void_type), .Bool => switch (repr) { .direct => return try self.spv.resolve(.bool_type), .indirect => return try self.intType(.unsigned, 1), }, .Int => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); return try self.intType(int_info.signedness, int_info.bits); }, .Enum => { - var buffer: Type.Payload.Bits = undefined; - const tag_ty = ty.intTagType(&buffer); + const tag_ty = ty.intTagType(); return self.resolveType(tag_ty, repr); }, .Float => { @@ -1290,7 +1293,7 @@ pub const DeclGen = struct { var member_index: usize = 0; for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) continue; + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field_ty, .indirect); member_index += 1; @@ -1315,7 +1318,7 @@ pub const DeclGen = struct { var member_index: usize = 0; for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field.ty, .indirect); member_names[member_index] = try self.spv.resolveString(struct_ty.fields.keys()[i]); @@ -1334,7 +1337,7 @@ pub const DeclGen = struct { .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // Just use a bool. // Note: Always generate the bool with indirect format, to save on some sanity // Perform the conversion to a direct bool when the field is extracted. @@ -1342,7 +1345,7 @@ pub const DeclGen = struct { } const payload_ty_ref = try self.resolveType(payload_ty, .indirect); - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { // Optional is actually a pointer or a slice. return payload_ty_ref; } @@ -1445,14 +1448,14 @@ pub const DeclGen = struct { }; fn errorUnionLayout(self: *DeclGen, payload_ty: Type) ErrorUnionLayout { - const target = self.getTarget(); + const mod = self.module; - const error_align = Type.anyerror.abiAlignment(target); - const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(mod); + const payload_align = payload_ty.abiAlignment(mod); const error_first = error_align > payload_align; return .{ - .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(), + .payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod), .error_first = error_first, }; } @@ -1529,14 +1532,15 @@ pub const DeclGen = struct { } fn genDecl(self: *DeclGen) !void { - const decl = self.module.declPtr(self.decl_index); + const mod = self.module; + const decl = mod.declPtr(self.decl_index); const spv_decl_index = try self.resolveDecl(self.decl_index); const decl_id = self.spv.declPtr(spv_decl_index).result_id; log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); if (decl.val.castTag(.function)) |_| { - assert(decl.ty.zigTypeTag() == .Fn); + assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()), @@ -1634,7 +1638,8 @@ pub const DeclGen = struct { /// Convert representation from indirect (in memory) to direct (in 'register') /// This converts the argument type from resolveType(ty, .indirect) to resolveType(ty, .direct). fn convertToDirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - return switch (ty.zigTypeTag()) { + const mod = self.module; + return switch (ty.zigTypeTag(mod)) { .Bool => blk: { const direct_bool_ty_ref = try self.resolveType(ty, .direct); const indirect_bool_ty_ref = try self.resolveType(ty, .indirect); @@ -1655,7 +1660,8 @@ pub const DeclGen = struct { /// Convert representation from direct (in 'register) to direct (in memory) /// This converts the argument type from resolveType(ty, .direct) to resolveType(ty, .indirect). fn convertToIndirect(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef { - return switch (ty.zigTypeTag()) { + const mod = self.module; + return switch (ty.zigTypeTag(mod)) { .Bool => blk: { const indirect_bool_ty_ref = try self.resolveType(ty, .indirect); break :blk self.boolToInt(indirect_bool_ty_ref, operand_id); @@ -2056,6 +2062,7 @@ pub const DeclGen = struct { } fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; if (self.liveness.isUnused(inst)) return null; const ty = self.air.typeOfIndex(inst); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; @@ -2083,7 +2090,7 @@ pub const DeclGen = struct { if (elem.isUndef()) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { - const int = elem.toSignedInt(self.getTarget()); + const int = elem.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len); self.func.body.writeOperand(spec.LiteralInteger, unsigned); } @@ -2189,13 +2196,13 @@ pub const DeclGen = struct { lhs_id: IdRef, rhs_id: IdRef, ) !IdRef { + const mod = self.module; var cmp_lhs_id = lhs_id; var cmp_rhs_id = rhs_id; const opcode: Opcode = opcode: { - var int_buffer: Type.Payload.Bits = undefined; - const op_ty = switch (ty.zigTypeTag()) { + const op_ty = switch (ty.zigTypeTag(mod)) { .Int, .Bool, .Float => ty, - .Enum => ty.intTagType(&int_buffer), + .Enum => ty.intTagType(), .ErrorSet => Type.u16, .Pointer => blk: { // Note that while SPIR-V offers OpPtrEqual and OpPtrNotEqual, they are @@ -2303,13 +2310,14 @@ pub const DeclGen = struct { src_ty: Type, src_id: IdRef, ) !IdRef { + const mod = self.module; const dst_ty_ref = try self.resolveType(dst_ty, .direct); const result_id = self.spv.allocId(); // TODO: Some more cases are missing here // See fn bitCast in llvm.zig - if (src_ty.zigTypeTag() == .Int and dst_ty.isPtrAtRuntime()) { + if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) { try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{ .id_result_type = self.typeId(dst_ty_ref), .id_result = result_id, @@ -2342,8 +2350,8 @@ pub const DeclGen = struct { const dest_ty = self.air.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); - const target = self.getTarget(); - const dest_info = dest_ty.intInfo(target); + const mod = self.module; + const dest_info = dest_ty.intInfo(mod); // TODO: Masking? @@ -2485,8 +2493,9 @@ pub const DeclGen = struct { } fn ptrElemPtr(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, index_id: IdRef) !IdRef { + const mod = self.module; // Construct new pointer type for the resulting pointer - const elem_ty = ptr_ty.elemType2(); // use elemType() so that we get T for *[N]T. + const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ty_ref = try self.resolveType(elem_ty, .direct); const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace())); if (ptr_ty.isSinglePointer()) { @@ -2502,12 +2511,13 @@ pub const DeclGen = struct { fn airPtrElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); // TODO: Make this return a null ptr or something - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; const ptr_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2536,8 +2546,8 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const un_ty = self.air.typeOf(ty_op.operand); - const target = self.module.getTarget(); - const layout = un_ty.unionGetLayout(target); + const mod = self.module; + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolve(ty_op.operand); @@ -2551,6 +2561,7 @@ pub const DeclGen = struct { fn airStructFieldVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; @@ -2559,9 +2570,9 @@ pub const DeclGen = struct { const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - assert(struct_ty.zigTypeTag() == .Struct); // Cannot do unions yet. + assert(struct_ty.zigTypeTag(mod) == .Struct); // Cannot do unions yet. return try self.extractField(field_ty, object_id, field_index); } @@ -2573,8 +2584,9 @@ pub const DeclGen = struct { object_ptr: IdRef, field_index: u32, ) !?IdRef { + const mod = self.module; const object_ty = object_ptr_ty.childType(); - switch (object_ty.zigTypeTag()) { + switch (object_ty.zigTypeTag(mod)) { .Struct => switch (object_ty.containerLayout()) { .Packed => unreachable, // TODO else => { @@ -2667,6 +2679,7 @@ pub const DeclGen = struct { // the current block by first generating the code of the block, then a label, and then generate the rest of the current // ir.Block in a different SPIR-V block. + const mod = self.module; const label_id = self.spv.allocId(); // 4 chosen as arbitrary initial capacity. @@ -2690,7 +2703,7 @@ pub const DeclGen = struct { try self.beginSpvBlock(label_id); // If this block didn't produce a value, simply return here. - if (!ty.hasRuntimeBitsIgnoreComptime()) + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return null; // Combine the result from the blocks using the Phi instruction. @@ -2716,7 +2729,8 @@ pub const DeclGen = struct { const block = self.blocks.get(br.block_inst).?; const operand_ty = self.air.typeOf(br.operand); - if (operand_ty.hasRuntimeBits()) { + const mod = self.module; + if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. try block.incoming_blocks.append(self.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); @@ -2771,13 +2785,14 @@ pub const DeclGen = struct { } fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); const ptr = try self.resolve(bin_op.lhs); const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); - const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false; + const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { const undef = try self.spv.constUndef(ptr_ty_ref); try self.store(ptr_ty, ptr, undef); @@ -2805,7 +2820,8 @@ pub const DeclGen = struct { fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { const operand = self.air.instructions.items(.data)[inst].un_op; const operand_ty = self.air.typeOf(operand); - if (operand_ty.hasRuntimeBits()) { + const mod = self.module; + if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(operand); try self.func.body.emit(self.spv.gpa, .OpReturnValue, .{ .value = operand_id }); } else { @@ -2814,11 +2830,12 @@ pub const DeclGen = struct { } fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { try self.func.body.emit(self.spv.gpa, .OpReturn, {}); return; } @@ -2946,6 +2963,7 @@ pub const DeclGen = struct { fn airIsNull(self: *DeclGen, inst: Air.Inst.Index, pred: enum { is_null, is_non_null }) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_id = try self.resolve(un_op); const optional_ty = self.air.typeOf(un_op); @@ -2955,7 +2973,7 @@ pub const DeclGen = struct { const bool_ty_ref = try self.resolveType(Type.bool, .direct); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { // Pointer payload represents nullability: pointer or slice. var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -2985,7 +3003,7 @@ pub const DeclGen = struct { return result_id; } - const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime()) + const is_non_null_id = if (optional_ty.hasRuntimeBitsIgnoreComptime(mod)) try self.extractField(Type.bool, operand_id, 1) else // Optional representation is bool indicating whether the optional is set @@ -3009,14 +3027,15 @@ pub const DeclGen = struct { fn airUnwrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3026,16 +3045,17 @@ pub const DeclGen = struct { fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try self.constBool(true, .direct); } const operand_id = try self.resolve(ty_op.operand); const optional_ty = self.air.typeOfIndex(inst); - if (optional_ty.optionalReprIsPayload()) { + if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3045,30 +3065,29 @@ pub const DeclGen = struct { } fn airSwitchBr(self: *DeclGen, inst: Air.Inst.Index) !void { - const target = self.getTarget(); + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolve(pl_op.operand); const cond_ty = self.air.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); - const cond_words: u32 = switch (cond_ty.zigTypeTag()) { + const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) { .Int => blk: { - const bits = cond_ty.intInfo(target).bits; + const bits = cond_ty.intInfo(mod).bits; const backing_bits = self.backingIntBits(bits) orelse { return self.todo("implement composite int switch", .{}); }; break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, .Enum => blk: { - var buffer: Type.Payload.Bits = undefined; - const int_ty = cond_ty.intTagType(&buffer); - const int_info = int_ty.intInfo(target); + const int_ty = cond_ty.intTagType(); + const int_info = int_ty.intInfo(mod); const backing_bits = self.backingIntBits(int_info.bits) orelse { return self.todo("implement composite int switch", .{}); }; break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, - else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag())}), // TODO: Figure out which types apply here, and work around them as we can only do integers. + else => return self.todo("implement switch for type {s}", .{@tagName(cond_ty.zigTypeTag(mod))}), // TODO: Figure out which types apply here, and work around them as we can only do integers. }; const num_cases = switch_br.data.cases_len; @@ -3112,15 +3131,15 @@ pub const DeclGen = struct { const label = IdRef{ .id = first_case_label.id + case_i }; for (items) |item| { - const value = self.air.value(item) orelse { + const value = self.air.value(item, mod) orelse { return self.todo("switch on runtime value???", .{}); }; - const int_val = switch (cond_ty.zigTypeTag()) { - .Int => if (cond_ty.isSignedInt()) @bitCast(u64, value.toSignedInt(target)) else value.toUnsignedInt(target), + const int_val = switch (cond_ty.zigTypeTag(mod)) { + .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod), .Enum => blk: { var int_buffer: Value.Payload.U64 = undefined; // TODO: figure out of cond_ty is correct (something with enum literals) - break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(target); // TODO: composite integer constants + break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(mod); // TODO: composite integer constants }, else => unreachable, }; @@ -3294,11 +3313,12 @@ pub const DeclGen = struct { fn airCall(self: *DeclGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !?IdRef { _ = modifier; + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); const callee_ty = self.air.typeOf(pl_op.operand); - const zig_fn_ty = switch (callee_ty.zigTypeTag()) { + const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => return self.fail("cannot call function pointers", .{}), else => unreachable, @@ -3320,7 +3340,7 @@ pub const DeclGen = struct { // temporary params buffer. const arg_id = try self.resolve(arg); const arg_ty = self.air.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; params[n_params] = arg_id; n_params += 1; @@ -3337,7 +3357,7 @@ pub const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime()) { + if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { return null; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 62a208406e..6117f1c1de 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1123,7 +1123,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In }, }; - const required_alignment = tv.ty.abiAlignment(self.base.options.target); + const required_alignment = tv.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); atom.size = @intCast(u32, code.len); atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment); @@ -1299,7 +1299,8 @@ pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom. fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { const decl = self.base.options.module.?.declPtr(decl_index); const ty = decl.ty; - const zig_ty = ty.zigTypeTag(); + const mod = self.base.options.module.?; + const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const index: u16 = blk: { if (val.isUndefDeep()) { @@ -1330,7 +1331,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, comple defer gpa.free(decl_name); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 1d358a29ab..9c6e54ea98 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -169,16 +169,16 @@ pub const DeclState = struct { fn addDbgInfoType( self: *DeclState, - module: *Module, + mod: *Module, atom_index: Atom.Index, ty: Type, ) error{OutOfMemory}!void { const arena = self.abbrev_type_arena.allocator(); const dbg_info_buffer = &self.dbg_info; - const target = module.getTarget(); + const target = mod.getTarget(); const target_endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .NoReturn => unreachable, .Void => { try dbg_info_buffer.append(@enumToInt(AbbrevKind.pad1)); @@ -189,12 +189,12 @@ pub const DeclState = struct { // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.boolean); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); }, .Int => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type)); // DW.AT.encoding, DW.FORM.data1 @@ -203,20 +203,20 @@ pub const DeclState = struct { .unsigned => DW.ATE.unsigned, }); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { try dbg_info_buffer.ensureUnusedCapacity(12); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type)); // DW.AT.encoding, DW.FORM.data1 dbg_info_buffer.appendAssumeCapacity(DW.ATE.address); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); } else { // Non-pointer optionals are structs: struct { .maybe = *, .val = * } var buf = try arena.create(Type.Payload.ElemType); @@ -224,10 +224,10 @@ pub const DeclState = struct { // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - const abi_size = ty.abiSize(target); + const abi_size = ty.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(7); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -251,7 +251,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const offset = abi_size - payload_ty.abiSize(target); + const offset = abi_size - payload_ty.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), offset); // DW.AT.structure_type delimit children try dbg_info_buffer.append(0); @@ -266,9 +266,9 @@ pub const DeclState = struct { try dbg_info_buffer.ensureUnusedCapacity(2); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(5); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -311,7 +311,7 @@ pub const DeclState = struct { // DW.AT.array_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_type)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); @@ -332,12 +332,12 @@ pub const DeclState = struct { // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); switch (ty.tag()) { .tuple, .anon_struct => { // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)}); + try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); const fields = ty.tupleFields(); for (fields.types, 0..) |field, field_index| { @@ -350,13 +350,13 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const field_off = ty.structFieldOffset(field_index, target); + const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, else => { // DW.AT.name, DW.FORM.string - const struct_name = try ty.nameAllocArena(arena, module); + const struct_name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(struct_name); dbg_info_buffer.appendAssumeCapacity(0); @@ -370,7 +370,7 @@ pub const DeclState = struct { const fields = ty.structFields(); for (fields.keys(), 0..) |field_name, field_index| { const field = fields.get(field_name).?; - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -382,7 +382,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata - const field_off = ty.structFieldOffset(field_index, target); + const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, @@ -395,9 +395,9 @@ pub const DeclState = struct { // DW.AT.enumeration_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type)); // DW.AT.byte_size, DW.FORM.udata - try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target)); + try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); // DW.AT.name, DW.FORM.string - const enum_name = try ty.nameAllocArena(arena, module); + const enum_name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.ensureUnusedCapacity(enum_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(enum_name); dbg_info_buffer.appendAssumeCapacity(0); @@ -424,7 +424,7 @@ pub const DeclState = struct { // See https://github.com/ziglang/zig/issues/645 var int_buffer: Value.Payload.U64 = undefined; const field_int_val = value.enumToInt(ty, &int_buffer); - break :value @bitCast(u64, field_int_val.toSignedInt(target)); + break :value @bitCast(u64, field_int_val.toSignedInt(mod)); } else @intCast(u64, field_i); mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); } @@ -433,12 +433,12 @@ pub const DeclState = struct { try dbg_info_buffer.append(0); }, .Union => { - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); const union_obj = ty.cast(Type.Payload.Union).?.data; const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0; const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size; const is_tagged = layout.tag_size > 0; - const union_name = try ty.nameAllocArena(arena, module); + const union_name = try ty.nameAllocArena(arena, mod); // TODO this is temporary to match current state of unions in Zig - we don't yet have // safety checks implemented meaning the implicit tag is not yet stored and generated @@ -481,7 +481,7 @@ pub const DeclState = struct { const fields = ty.unionFields(); for (fields.keys()) |field_name| { const field = fields.get(field_name).?; - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; // DW.AT.member try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member)); // DW.AT.name, DW.FORM.string @@ -517,7 +517,7 @@ pub const DeclState = struct { .ErrorSet => { try addDbgInfoErrorSet( self.abbrev_type_arena.allocator(), - module, + mod, ty, target, &self.dbg_info, @@ -526,18 +526,18 @@ pub const DeclState = struct { .ErrorUnion => { const error_ty = ty.errorUnionSet(); const payload_ty = ty.errorUnionPayload(); - const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - const abi_size = ty.abiSize(target); - const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(target) else 0; - const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(target); + const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_size = ty.abiSize(mod); + const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0; + const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(mod); // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const name = try ty.nameAllocArena(arena, module); + const name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.writer().print("{s}\x00", .{name}); if (!payload_ty.isNoReturn()) { @@ -685,7 +685,8 @@ pub const DeclState = struct { const atom_index = self.di_atom_decls.get(owner_decl).?; const name_with_null = name.ptr[0 .. name.len + 1]; try dbg_info.append(@enumToInt(AbbrevKind.variable)); - const target = self.mod.getTarget(); + const mod = self.mod; + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); const child_ty = if (is_ptr) ty.childType() else ty; @@ -790,9 +791,9 @@ pub const DeclState = struct { const fixup = dbg_info.items.len; dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc 1, - if (child_ty.isSignedInt()) DW.OP.consts else DW.OP.constu, + if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu, }); - if (child_ty.isSignedInt()) { + if (child_ty.isSignedInt(mod)) { try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x)); } else { try leb128.writeULEB128(dbg_info.writer(), x); @@ -805,7 +806,7 @@ pub const DeclState = struct { // DW.AT.location, DW.FORM.exprloc // uleb128(exprloc_len) // DW.OP.implicit_value uleb128(len_of_bytes) bytes - const abi_size = @intCast(u32, child_ty.abiSize(target)); + const abi_size = @intCast(u32, child_ty.abiSize(mod)); var implicit_value_len = std.ArrayList(u8).init(self.gpa); defer implicit_value_len.deinit(); try leb128.writeULEB128(implicit_value_len.writer(), abi_size); @@ -979,7 +980,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) assert(decl.has_tv); - switch (decl.ty.zigTypeTag()) { + switch (decl.ty.zigTypeTag(mod)) { .Fn => { _ = try self.getOrCreateAtomForDecl(.src_fn, decl_index); @@ -1027,7 +1028,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len); const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(); + const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod); if (fn_ret_has_bits) { dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram)); } else { @@ -1059,7 +1060,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) pub fn commitDeclState( self: *Dwarf, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, sym_addr: u64, sym_size: u64, @@ -1071,12 +1072,12 @@ pub fn commitDeclState( const gpa = self.allocator; var dbg_line_buffer = &decl_state.dbg_line; var dbg_info_buffer = &decl_state.dbg_info; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const target_endian = self.target.cpu.arch.endian(); assert(decl.has_tv); - switch (decl.ty.zigTypeTag()) { + switch (decl.ty.zigTypeTag(mod)) { .Fn => { // Since the Decl is a function, we need to update the .debug_line program. // Perform the relocations based on vaddr. @@ -1283,7 +1284,7 @@ pub fn commitDeclState( if (deferred) continue; symbol.offset = @intCast(u32, dbg_info_buffer.items.len); - try decl_state.addDbgInfoType(module, di_atom_index, ty); + try decl_state.addDbgInfoType(mod, di_atom_index, ty); } } @@ -1319,7 +1320,7 @@ pub fn commitDeclState( reloc.offset, value, target, - ty.fmt(module), + ty.fmt(mod), }); mem.writeInt( u32, @@ -2663,7 +2664,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct { fn addDbgInfoErrorSet( arena: Allocator, - module: *Module, + mod: *Module, ty: Type, target: std.Target, dbg_info_buffer: *std.ArrayList(u8), @@ -2673,10 +2674,10 @@ fn addDbgInfoErrorSet( // DW.AT.enumeration_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type)); // DW.AT.byte_size, DW.FORM.udata - const abi_size = Type.anyerror.abiSize(target); + const abi_size = Type.anyerror.abiSize(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string - const name = try ty.nameAllocArena(arena, module); + const name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.writer().print("{s}\x00", .{name}); // DW.AT.enumerator @@ -2691,7 +2692,7 @@ fn addDbgInfoErrorSet( const error_names = ty.errorSetNames(); for (error_names) |error_name| { - const kv = module.getErrorValue(error_name) catch unreachable; + const kv = mod.getErrorValue(error_name) catch unreachable; // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64)); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant)); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 2a28f880ac..7bd36a9b60 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2449,9 +2449,10 @@ pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.I } fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { - const decl = self.base.options.module.?.declPtr(decl_index); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); const ty = decl.ty; - const zig_ty = ty.zigTypeTag(); + const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const shdr_index: u16 = blk: { if (val.isUndefDeep()) { @@ -2482,7 +2483,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s defer self.base.allocator.free(decl_name); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; @@ -2826,7 +2827,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module }, }; - const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); + const required_alignment = typed_value.ty.abiAlignment(mod); const shdr_index = self.rodata_section_index.?; const phdr_index = self.sections.items(.phdr_index)[shdr_index]; const local_sym = self.getAtom(atom_index).getSymbolPtr(self); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index a346ec756f..306661c5c5 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1948,7 +1948,8 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu }, }; - const required_alignment = typed_value.ty.abiAlignment(self.base.options.target); + const mod = self.base.options.module.?; + const required_alignment = typed_value.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); atom.size = code.len; // TODO: work out logic for disambiguating functions from function pointers @@ -2152,6 +2153,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In } fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void { + const mod = self.base.options.module.?; // Lowering a TLV on macOS involves two stages: // 1. first we lower the initializer into appopriate section (__thread_data or __thread_bss) // 2. next, we create a corresponding threadlocal variable descriptor in __thread_vars @@ -2202,7 +2204,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D }, }; - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_name = try decl.getFullyQualifiedName(module); defer gpa.free(decl_name); @@ -2262,7 +2264,8 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { const decl = self.base.options.module.?.declPtr(decl_index); const ty = decl.ty; const val = decl.val; - const zig_ty = ty.zigTypeTag(); + const mod = self.base.options.module.?; + const zig_ty = ty.zigTypeTag(mod); const mode = self.base.options.optimize_mode; const single_threaded = self.base.options.single_threaded; const sect_id: u8 = blk: { @@ -2301,7 +2304,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64 const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const required_alignment = decl.getAlignment(self.base.options.target); + const required_alignment = decl.getAlignment(mod); const decl_name = try decl.getFullyQualifiedName(mod); defer gpa.free(decl_name); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 6d74e17dfd..7a389a789d 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -432,8 +432,9 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) } /// called at the end of update{Decl,Func} fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { - const decl = self.base.options.module.?.declPtr(decl_index); - const is_fn = (decl.ty.zigTypeTag() == .Fn); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); + const is_fn = (decl.ty.zigTypeTag(mod) == .Fn); log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); const sym_t: aout.Sym.Type = if (is_fn) .t else .d; @@ -704,7 +705,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No log.debug("relocating the address of '{s}' + {d} into '{s}' + {d}", .{ target_decl.name, addend, source_decl.name, offset }); const code = blk: { - const is_fn = source_decl.ty.zigTypeTag() == .Fn; + const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn; if (is_fn) { const table = self.fn_decl_table.get(source_decl.getFileScope()).?.functions; const output = table.get(source_decl_index).?; @@ -1031,7 +1032,7 @@ pub fn getDeclVAddr( ) !u64 { const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { var start = self.bases.text; var it_file = self.fn_decl_table.iterator(); while (it_file.next()) |fentry| { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index befd2d68c9..0154207368 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1473,7 +1473,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8 atom.size = @intCast(u32, code.len); if (code.len == 0) return; - atom.alignment = decl.ty.abiAlignment(wasm.base.options.target); + atom.alignment = decl.ty.abiAlignment(mod); } /// From a given symbol location, returns its `wasm.GlobalType`. @@ -1523,9 +1523,8 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type { /// Returns the symbol index of the local /// The given `decl` is the parent decl whom owns the constant. pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.Index) !u32 { - assert(tv.ty.zigTypeTag() != .Fn); // cannot create local symbols for functions - const mod = wasm.base.options.module.?; + assert(tv.ty.zigTypeTag(mod) != .Fn); // cannot create local symbols for functions const decl = mod.declPtr(decl_index); // Create and initialize a new local symbol and atom @@ -1543,7 +1542,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In const code = code: { const atom = wasm.getAtomPtr(atom_index); - atom.alignment = tv.ty.abiAlignment(wasm.base.options.target); + atom.alignment = tv.ty.abiAlignment(mod); wasm.symbols.items[atom.sym_index] = .{ .name = try wasm.string_table.put(wasm.base.allocator, name), .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL), @@ -1632,7 +1631,7 @@ pub fn getDeclVAddr( const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?; const atom = wasm.getAtomPtr(atom_index); const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32; - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { assert(reloc_info.addend == 0); // addend not allowed for function relocations // We found a function pointer, so add it to our table, // as function pointers are not allowed to be stored inside the data section. @@ -2933,7 +2932,8 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 { const atom_index = try wasm.createAtom(); const atom = wasm.getAtomPtr(atom_index); const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - atom.alignment = slice_ty.abiAlignment(wasm.base.options.target); + const mod = wasm.base.options.module.?; + atom.alignment = slice_ty.abiAlignment(mod); const sym_index = atom.sym_index; const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table"); @@ -3000,7 +3000,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { .offset = offset, .addend = @intCast(i32, addend), }); - atom.size += @intCast(u32, slice_ty.abiSize(wasm.base.options.target)); + atom.size += @intCast(u32, slice_ty.abiSize(mod)); addend += len; // as we updated the error name table, we now store the actual name within the names atom @@ -3369,7 +3369,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod if (decl.isExtern()) continue; const atom_index = entry.value_ptr.*; const atom = wasm.getAtomPtr(atom_index); - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { try wasm.parseAtom(atom_index, .function); } else if (decl.getVariable()) |variable| { if (!variable.is_mutable) { diff --git a/src/print_air.zig b/src/print_air.zig index 2e8ab1a642..e8875ff018 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -7,6 +7,7 @@ const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); +const InternPool = @import("InternPool.zig"); pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) void { const instruction_bytes = air.instructions.len * @@ -965,14 +966,13 @@ const Writer = struct { operand: Air.Inst.Ref, dies: bool, ) @TypeOf(s).Error!void { - var i: usize = @enumToInt(operand); + const i = @enumToInt(operand); - if (i < Air.Inst.Ref.typed_value_map.len) { + if (i < InternPool.static_len) { return s.print("@{}", .{operand}); } - i -= Air.Inst.Ref.typed_value_map.len; - return w.writeInstIndex(s, @intCast(Air.Inst.Index, i), dies); + return w.writeInstIndex(s, i - InternPool.static_len, dies); } fn writeInstIndex( diff --git a/src/print_zir.zig b/src/print_zir.zig index cfa68424d0..a2178bbb49 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -3,6 +3,7 @@ const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; const Ast = std.zig.Ast; +const InternPool = @import("InternPool.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); @@ -2468,14 +2469,9 @@ const Writer = struct { } fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void { - var i: usize = @enumToInt(ref); - - if (i < Zir.Inst.Ref.typed_value_map.len) { - return stream.print("@{}", .{ref}); - } - i -= Zir.Inst.Ref.typed_value_map.len; - - return self.writeInstIndex(stream, @intCast(Zir.Inst.Index, i)); + const i = @enumToInt(ref); + if (i < InternPool.static_len) return stream.print("@{}", .{@intToEnum(InternPool.Index, i)}); + return self.writeInstIndex(stream, i - InternPool.static_len); } fn writeInstIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { diff --git a/src/target.zig b/src/target.zig index 5e66c8f417..c89f8ce92c 100644 --- a/src/target.zig +++ b/src/target.zig @@ -512,134 +512,6 @@ pub fn needUnwindTables(target: std.Target) bool { return target.os.tag == .windows; } -pub const AtomicPtrAlignmentError = error{ - FloatTooBig, - IntTooBig, - BadType, -}; - -pub const AtomicPtrAlignmentDiagnostics = struct { - bits: u16 = undefined, - max_bits: u16 = undefined, -}; - -/// If ABI alignment of `ty` is OK for atomic operations, returns 0. -/// Otherwise returns the alignment required on a pointer for the target -/// to perform atomic operations. -// TODO this function does not take into account CPU features, which can affect -// this value. Audit this! -pub fn atomicPtrAlignment( - target: std.Target, - ty: Type, - diags: *AtomicPtrAlignmentDiagnostics, -) AtomicPtrAlignmentError!u32 { - const max_atomic_bits: u16 = switch (target.cpu.arch) { - .avr, - .msp430, - .spu_2, - => 16, - - .arc, - .arm, - .armeb, - .hexagon, - .m68k, - .le32, - .mips, - .mipsel, - .nvptx, - .powerpc, - .powerpcle, - .r600, - .riscv32, - .sparc, - .sparcel, - .tce, - .tcele, - .thumb, - .thumbeb, - .x86, - .xcore, - .amdil, - .hsail, - .spir, - .kalimba, - .lanai, - .shave, - .wasm32, - .renderscript32, - .csky, - .spirv32, - .dxil, - .loongarch32, - .xtensa, - => 32, - - .amdgcn, - .bpfel, - .bpfeb, - .le64, - .mips64, - .mips64el, - .nvptx64, - .powerpc64, - .powerpc64le, - .riscv64, - .sparc64, - .s390x, - .amdil64, - .hsail64, - .spir64, - .wasm64, - .renderscript64, - .ve, - .spirv64, - .loongarch64, - => 64, - - .aarch64, - .aarch64_be, - .aarch64_32, - => 128, - - .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64, - }; - - var buffer: Type.Payload.Bits = undefined; - - const int_ty = switch (ty.zigTypeTag()) { - .Int => ty, - .Enum => ty.intTagType(&buffer), - .Float => { - const bit_count = ty.floatBits(target); - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.FloatTooBig; - } - return 0; - }, - .Bool => return 0, - else => { - if (ty.isPtrAtRuntime()) return 0; - return error.BadType; - }, - }; - - const bit_count = int_ty.intInfo(target).bits; - if (bit_count > max_atomic_bits) { - diags.* = .{ - .bits = bit_count, - .max_bits = max_atomic_bits, - }; - return error.IntTooBig; - } - - return 0; -} - pub fn defaultAddressSpace( target: std.Target, context: enum { diff --git a/src/type.zig b/src/type.zig index e5b41e717b..259079a26c 100644 --- a/src/type.zig +++ b/src/type.zig @@ -9,27 +9,102 @@ const log = std.log.scoped(.Type); const target_util = @import("target.zig"); const TypedValue = @import("TypedValue.zig"); const Sema = @import("Sema.zig"); +const InternPool = @import("InternPool.zig"); const file_struct = @This(); -/// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication. -/// It's important for this type to be small. -/// Types are not de-duplicated, which helps with multi-threading since it obviates the requirement -/// of obtaining a lock on a global type table, as well as making the -/// garbage collection bookkeeping simpler. -/// This union takes advantage of the fact that the first page of memory -/// is unmapped, giving us 4096 possible enum tags that have no payload. -pub const Type = extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, - ptr_otherwise: *Payload, - - pub fn zigTypeTag(ty: Type) std.builtin.TypeId { - return ty.zigTypeTagOrPoison() catch unreachable; - } +pub const Type = struct { + /// We are migrating towards using this for every Type object. However, many + /// types are still represented the legacy way. This is indicated by using + /// InternPool.Index.none. + ip_index: InternPool.Index, + + /// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication. + /// This union takes advantage of the fact that the first page of memory + /// is unmapped, giving us 4096 possible enum tags that have no payload. + legacy: extern union { + /// If the tag value is less than Tag.no_payload_count, then no pointer + /// dereference is needed. + tag_if_small_enough: Tag, + ptr_otherwise: *Payload, + }, + + pub fn zigTypeTag(ty: Type, mod: *const Module) std.builtin.TypeId { + return ty.zigTypeTagOrPoison(mod) catch unreachable; + } + + pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { + if (ty.ip_index != .none) { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return .Int, + .ptr_type => return .Pointer, + .array_type => return .Array, + .vector_type => return .Vector, + .optional_type => return .Optional, + .error_union_type => return .ErrorUnion, + .struct_type => return .Struct, + .simple_type => |s| switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + => return .Float, + + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + => return .Int, + + .anyopaque => return .Opaque, + .bool => return .Bool, + .void => return .Void, + .type => return .Type, + .anyerror => return .ErrorSet, + .comptime_int => return .ComptimeInt, + .comptime_float => return .ComptimeFloat, + .noreturn => return .NoReturn, + .@"anyframe" => return .AnyFrame, + .null => return .Null, + .undefined => return .Undefined, + .enum_literal => return .EnumLiteral, + + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + => return .Enum, + + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => return .Struct, + + .type_info => return .Union, + + .generic_poison => unreachable, + .var_args_param => unreachable, + }, - pub fn zigTypeTagOrPoison(ty: Type) error{GenericPoison}!std.builtin.TypeId { + .extern_func, + .int, + .enum_tag, + .simple_value, + => unreachable, // it's a value, not a type + } + } switch (ty.tag()) { .generic_poison => return error.GenericPoison, @@ -56,8 +131,6 @@ pub const Type = extern union { .c_ulong, .c_longlong, .c_ulonglong, - .int_signed, - .int_unsigned, => return .Int, .f16, @@ -85,10 +158,6 @@ pub const Type = extern union { .null => return .Null, .undefined => return .Undefined, - .fn_noreturn_no_args => return .Fn, - .fn_void_no_args => return .Fn, - .fn_naked_noreturn_no_args => return .Fn, - .fn_ccc_void_no_args => return .Fn, .function => return .Fn, .array, @@ -159,26 +228,26 @@ pub const Type = extern union { } } - pub fn baseZigTypeTag(self: Type) std.builtin.TypeId { - return switch (self.zigTypeTag()) { - .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(), + pub fn baseZigTypeTag(self: Type, mod: *const Module) std.builtin.TypeId { + return switch (self.zigTypeTag(mod)) { + .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(mod), .Optional => { var buf: Payload.ElemType = undefined; - return self.optionalChild(&buf).baseZigTypeTag(); + return self.optionalChild(&buf).baseZigTypeTag(mod); }, else => |t| t, }; } - pub fn isSelfComparable(ty: Type, is_equality_cmp: bool) bool { - return switch (ty.zigTypeTag()) { + pub fn isSelfComparable(ty: Type, mod: *const Module, is_equality_cmp: bool) bool { + return switch (ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, => true, - .Vector => ty.elemType2().isSelfComparable(is_equality_cmp), + .Vector => ty.elemType2(mod).isSelfComparable(mod, is_equality_cmp), .Bool, .Type, @@ -205,44 +274,54 @@ pub const Type = extern union { .Optional => { if (!is_equality_cmp) return false; var buf: Payload.ElemType = undefined; - return ty.optionalChild(&buf).isSelfComparable(is_equality_cmp); + return ty.optionalChild(&buf).isSelfComparable(mod, is_equality_cmp); }, }; } pub fn initTag(comptime small_tag: Tag) Type { comptime assert(@enumToInt(small_tag) < Tag.no_payload_count); - return .{ .tag_if_small_enough = small_tag }; + return Type{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = small_tag }, + }; } pub fn initPayload(payload: *Payload) Type { assert(@enumToInt(payload.tag) >= Tag.no_payload_count); - return .{ .ptr_otherwise = payload }; + return Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = payload }, + }; } - pub fn tag(self: Type) Tag { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return self.tag_if_small_enough; + pub fn tag(ty: Type) Tag { + assert(ty.ip_index == .none); + if (@enumToInt(ty.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return ty.legacy.tag_if_small_enough; } else { - return self.ptr_otherwise.tag; + return ty.legacy.ptr_otherwise.tag; } } /// Prefer `castTag` to this. pub fn cast(self: Type, comptime T: type) ?*T { + if (self.ip_index != .none) { + return null; + } if (@hasField(T, "base_tag")) { return self.castTag(T.base_tag); } - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { return null; } inline for (@typeInfo(Tag).Enum.fields) |field| { if (field.value < Tag.no_payload_count) continue; const t = @intToEnum(Tag, field.value); - if (self.ptr_otherwise.tag == t) { + if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { - return @fieldParentPtr(T, "base", self.ptr_otherwise); + return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); } return null; } @@ -251,11 +330,14 @@ pub const Type = extern union { } pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) + if (self.ip_index != .none) { + return null; + } + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; - if (self.ptr_otherwise.tag == t) - return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise); + if (self.legacy.ptr_otherwise.tag == t) + return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); return null; } @@ -285,10 +367,10 @@ pub const Type = extern union { } /// If it is a function pointer, returns the function type. Otherwise returns null. - pub fn castPtrToFn(ty: Type) ?Type { - if (ty.zigTypeTag() != .Pointer) return null; + pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { + if (ty.zigTypeTag(mod) != .Pointer) return null; const elem_ty = ty.childType(); - if (elem_ty.zigTypeTag() != .Fn) return null; + if (elem_ty.zigTypeTag(mod) != .Fn) return null; return elem_ty; } @@ -536,7 +618,10 @@ pub const Type = extern union { pub fn eql(a: Type, b: Type, mod: *Module) bool { // As a shortcut, if the small tags / addresses match, we're done. - if (a.tag_if_small_enough == b.tag_if_small_enough) return true; + if (a.ip_index != .none or b.ip_index != .none) { + return a.ip_index == b.ip_index; + } + if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { .generic_poison => unreachable, @@ -589,16 +674,11 @@ pub const Type = extern union { .i64, .u128, .i128, - .int_signed, - .int_unsigned, => { - if (b.zigTypeTag() != .Int) return false; + if (b.zigTypeTag(mod) != .Int) return false; if (b.isNamedInt()) return false; - - // Arbitrary sized integers. The target will not be branched upon, - // because we handled target-dependent cases above. - const info_a = a.intInfo(@as(Target, undefined)); - const info_b = b.intInfo(@as(Target, undefined)); + const info_a = a.intInfo(mod); + const info_b = b.intInfo(mod); return info_a.signedness == info_b.signedness and info_a.bits == info_b.bits; }, @@ -641,13 +721,8 @@ pub const Type = extern union { return opaque_obj_a == opaque_obj_b; }, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => { - if (b.zigTypeTag() != .Fn) return false; + .function => { + if (b.zigTypeTag(mod) != .Fn) return false; const a_info = a.fnInfo(); const b_info = b.fnInfo(); @@ -699,7 +774,7 @@ pub const Type = extern union { .array_sentinel, .vector, => { - if (a.zigTypeTag() != b.zigTypeTag()) return false; + if (a.zigTypeTag(mod) != b.zigTypeTag(mod)) return false; if (a.arrayLen() != b.arrayLen()) return false; @@ -737,7 +812,7 @@ pub const Type = extern union { .manyptr_const_u8, .manyptr_const_u8_sentinel_0, => { - if (b.zigTypeTag() != .Pointer) return false; + if (b.zigTypeTag(mod) != .Pointer) return false; const info_a = a.ptrInfo().data; const info_b = b.ptrInfo().data; @@ -783,7 +858,7 @@ pub const Type = extern union { .optional_single_const_pointer, .optional_single_mut_pointer, => { - if (b.zigTypeTag() != .Optional) return false; + if (b.zigTypeTag(mod) != .Optional) return false; var buf_a: Payload.ElemType = undefined; var buf_b: Payload.ElemType = undefined; @@ -791,7 +866,7 @@ pub const Type = extern union { }, .anyerror_void_error_union, .error_union => { - if (b.zigTypeTag() != .ErrorUnion) return false; + if (b.zigTypeTag(mod) != .ErrorUnion) return false; const a_set = a.errorUnionSet(); const b_set = b.errorUnionSet(); @@ -805,8 +880,8 @@ pub const Type = extern union { }, .anyframe_T => { - if (b.zigTypeTag() != .AnyFrame) return false; - return a.elemType2().eql(b.elemType2(), mod); + if (b.zigTypeTag(mod) != .AnyFrame) return false; + return a.elemType2(mod).eql(b.elemType2(mod), mod); }, .empty_struct => { @@ -941,6 +1016,9 @@ pub const Type = extern union { } pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { + if (ty.ip_index != .none) { + return mod.intern_pool.indexToKey(ty.ip_index).hashWithHasher(hasher); + } switch (ty.tag()) { .generic_poison => unreachable, @@ -1007,13 +1085,10 @@ pub const Type = extern union { .i64, .u128, .i128, - .int_signed, - .int_unsigned, => { - // Arbitrary sized integers. The target will not be branched upon, - // because we handled target-dependent cases above. + // Arbitrary sized integers. std.hash.autoHash(hasher, std.builtin.TypeId.Int); - const info = ty.intInfo(@as(Target, undefined)); + const info = ty.intInfo(mod); std.hash.autoHash(hasher, info.signedness); std.hash.autoHash(hasher, info.bits); }, @@ -1052,12 +1127,7 @@ pub const Type = extern union { std.hash.autoHash(hasher, opaque_obj); }, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => { + .function => { std.hash.autoHash(hasher, std.builtin.TypeId.Fn); const fn_info = ty.fnInfo(); @@ -1275,9 +1345,15 @@ pub const Type = extern union { }; pub fn copy(self: Type, allocator: Allocator) error{OutOfMemory}!Type { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return Type{ .tag_if_small_enough = self.tag_if_small_enough }; - } else switch (self.ptr_otherwise.tag) { + if (self.ip_index != .none) { + return Type{ .ip_index = self.ip_index, .legacy = undefined }; + } + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return Type{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, + }; + } else switch (self.legacy.ptr_otherwise.tag) { .u1, .u8, .i8, @@ -1317,10 +1393,6 @@ pub const Type = extern union { .noreturn, .null, .undefined, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -1370,13 +1442,12 @@ pub const Type = extern union { .base = .{ .tag = payload.base.tag }, .data = try payload.data.copy(allocator), }; - return Type{ .ptr_otherwise = &new_payload.base }; + return Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, - .int_signed, - .int_unsigned, - => return self.copyPayloadShallow(allocator, Payload.Bits), - .vector => { const payload = self.castTag(.vector).?.data; return Tag.vector.create(allocator, .{ @@ -1511,7 +1582,10 @@ pub const Type = extern union { const payload = self.cast(T).?; const new_payload = try allocator.create(T); new_payload.* = payload.*; - return Type{ .ptr_otherwise = &new_payload.base }; + return Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; } pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { @@ -1550,7 +1624,7 @@ pub const Type = extern union { } /// This is a debug function. In order to print types in a meaningful way - /// we also need access to the target. + /// we also need access to the module. pub fn dump( start_type: Type, comptime unused_format_string: []const u8, @@ -1559,10 +1633,13 @@ pub const Type = extern union { ) @TypeOf(writer).Error!void { _ = options; comptime assert(unused_format_string.len == 0); + if (start_type.ip_index != .none) { + return writer.print("(intern index: {d})", .{@enumToInt(start_type.ip_index)}); + } if (true) { - // This is disabled to work around a bug where this function - // recursively causes more generic function instantiations - // resulting in an infinite loop in the compiler. + // This is disabled to work around a stage2 bug where this function recursively + // causes more generic function instantiations resulting in an infinite loop + // in the compiler. try writer.writeAll("[TODO fix internal compiler bug regarding dump]"); return; } @@ -1656,10 +1733,6 @@ pub const Type = extern union { .anyerror_void_error_union => return writer.writeAll("anyerror!void"), .const_slice_u8 => return writer.writeAll("[]const u8"), .const_slice_u8_sentinel_0 => return writer.writeAll("[:0]const u8"), - .fn_noreturn_no_args => return writer.writeAll("fn() noreturn"), - .fn_void_no_args => return writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args => return writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args => return writer.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int => return writer.writeAll("*const comptime_int"), .manyptr_u8 => return writer.writeAll("[*]u8"), .manyptr_const_u8 => return writer.writeAll("[*]const u8"), @@ -1820,14 +1893,6 @@ pub const Type = extern union { ty = pointee_type; continue; }, - .int_signed => { - const bits = ty.castTag(.int_signed).?.data; - return writer.print("i{d}", .{bits}); - }, - .int_unsigned => { - const bits = ty.castTag(.int_unsigned).?.data; - return writer.print("u{d}", .{bits}); - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); @@ -1938,6 +2003,26 @@ pub const Type = extern union { /// Prints a name suitable for `@typeName`. pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + const sign_char: u8 = switch (int_type.signedness) { + .signed => 'i', + .unsigned => 'u', + }; + return writer.print("{c}{d}", .{ sign_char, int_type.bits }); + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => |s| return writer.writeAll(@tagName(s)), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, + }; const t = ty.tag(); switch (t) { .inferred_alloc_const => unreachable, @@ -2041,10 +2126,6 @@ pub const Type = extern union { .anyerror_void_error_union => try writer.writeAll("anyerror!void"), .const_slice_u8 => try writer.writeAll("[]const u8"), .const_slice_u8_sentinel_0 => try writer.writeAll("[:0]const u8"), - .fn_noreturn_no_args => try writer.writeAll("fn() noreturn"), - .fn_void_no_args => try writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args => try writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args => try writer.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int => try writer.writeAll("*const comptime_int"), .manyptr_u8 => try writer.writeAll("[*]u8"), .manyptr_const_u8 => try writer.writeAll("[*]const u8"), @@ -2200,7 +2281,7 @@ pub const Type = extern union { if (info.@"align" != 0) { try writer.print("align({d}", .{info.@"align"}); } else { - const alignment = info.pointee_type.abiAlignment(mod.getTarget()); + const alignment = info.pointee_type.abiAlignment(mod); try writer.print("align({d}", .{alignment}); } @@ -2224,14 +2305,6 @@ pub const Type = extern union { try print(info.pointee_type, writer, mod); }, - .int_signed => { - const bits = ty.castTag(.int_signed).?.data; - return writer.print("i{d}", .{bits}); - }, - .int_unsigned => { - const bits = ty.castTag(.int_unsigned).?.data; - return writer.print("u{d}", .{bits}); - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); @@ -2317,10 +2390,6 @@ pub const Type = extern union { .noreturn => return Value.initTag(.noreturn_type), .null => return Value.initTag(.null_type), .undefined => return Value.initTag(.undefined_type), - .fn_noreturn_no_args => return Value.initTag(.fn_noreturn_no_args_type), - .fn_void_no_args => return Value.initTag(.fn_void_no_args_type), - .fn_naked_noreturn_no_args => return Value.initTag(.fn_naked_noreturn_no_args_type), - .fn_ccc_void_no_args => return Value.initTag(.fn_ccc_void_no_args_type), .single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type), .const_slice_u8 => return Value.initTag(.const_slice_u8_type), .const_slice_u8_sentinel_0 => return Value.initTag(.const_slice_u8_sentinel_0_type), @@ -2360,9 +2429,24 @@ pub const Type = extern union { /// may return false positives. pub fn hasRuntimeBitsAdvanced( ty: Type, + mod: *const Module, ignore_comptime_only: bool, strat: AbiAlignmentAdvancedStrat, ) RuntimeBitsError!bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits != 0, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; switch (ty.tag()) { .u1, .u8, @@ -2440,12 +2524,12 @@ pub const Type = extern union { => { if (ignore_comptime_only) { return true; - } else if (ty.childType().zigTypeTag() == .Fn) { + } else if (ty.childType().zigTypeTag(mod) == .Fn) { return !ty.childType().fnInfo().is_generic; } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(ty)); } else { - return !comptimeOnly(ty); + return !comptimeOnly(ty, mod); } }, @@ -2465,10 +2549,6 @@ pub const Type = extern union { // Special exceptions have to be made when emitting functions due to // this returning false. .function, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, => return false, .optional => { @@ -2483,7 +2563,7 @@ pub const Type = extern union { } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(child_ty)); } else { - return !comptimeOnly(child_ty); + return !comptimeOnly(child_ty, mod); } }, @@ -2502,7 +2582,7 @@ pub const Type = extern union { } for (struct_obj.fields.values()) |field| { if (field.is_comptime) continue; - if (try field.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } else { return false; @@ -2511,16 +2591,15 @@ pub const Type = extern union { .enum_full => { const enum_full = ty.castTag(.enum_full).?.data; - return enum_full.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat); + return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); }, .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.fields.count() >= 2; }, .enum_numbered, .enum_nonexhaustive => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return int_tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat); + const int_tag_ty = ty.intTagType(); + return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); }, .@"union" => { @@ -2537,7 +2616,7 @@ pub const Type = extern union { .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, } for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } else { return false; @@ -2545,7 +2624,7 @@ pub const Type = extern union { }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) { + if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) { return true; } @@ -2555,7 +2634,7 @@ pub const Type = extern union { .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, } for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } else { return false; @@ -2563,18 +2642,16 @@ pub const Type = extern union { }, .array, .vector => return ty.arrayLen() != 0 and - try ty.elemType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat), + try ty.elemType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .array_u8 => return ty.arrayLen() != 0, - .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(ignore_comptime_only, strat), - - .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data != 0, + .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { const val = tuple.values[i]; if (val.tag() != .unreachable_value) continue; // comptime field - if (try field_ty.hasRuntimeBitsAdvanced(ignore_comptime_only, strat)) return true; + if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } return false; }, @@ -2588,7 +2665,21 @@ pub const Type = extern union { /// true if and only if the type has a well-defined memory layout /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout - pub fn hasWellDefinedLayout(ty: Type) bool { + pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return true, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; return switch (ty.tag()) { .u1, .u8, @@ -2626,8 +2717,6 @@ pub const Type = extern union { .manyptr_const_u8_sentinel_0, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .pointer, .single_const_pointer, .single_mut_pointer, @@ -2670,10 +2759,6 @@ pub const Type = extern union { .enum_literal, .type_info, // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -2698,25 +2783,25 @@ pub const Type = extern union { .array, .array_sentinel, - => ty.childType().hasWellDefinedLayout(), + => ty.childType().hasWellDefinedLayout(mod), - .optional => ty.isPtrLikeOptional(), + .optional => ty.isPtrLikeOptional(mod), .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto, .union_tagged => false, }; } - pub fn hasRuntimeBits(ty: Type) bool { - return hasRuntimeBitsAdvanced(ty, false, .eager) catch unreachable; + pub fn hasRuntimeBits(ty: Type, mod: *const Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; } - pub fn hasRuntimeBitsIgnoreComptime(ty: Type) bool { - return hasRuntimeBitsAdvanced(ty, true, .eager) catch unreachable; + pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool { + return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; } - pub fn isFnOrHasRuntimeBits(ty: Type) bool { - switch (ty.zigTypeTag()) { + pub fn isFnOrHasRuntimeBits(ty: Type, mod: *const Module) bool { + switch (ty.zigTypeTag(mod)) { .Fn => { const fn_info = ty.fnInfo(); if (fn_info.is_generic) return false; @@ -2727,18 +2812,18 @@ pub const Type = extern union { .Inline => return false, else => {}, } - if (fn_info.return_type.comptimeOnly()) return false; + if (fn_info.return_type.comptimeOnly(mod)) return false; return true; }, - else => return ty.hasRuntimeBits(), + else => return ty.hasRuntimeBits(mod), } } /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. - pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Fn => true, - else => return ty.hasRuntimeBitsIgnoreComptime(), + else => return ty.hasRuntimeBitsIgnoreComptime(mod), }; } @@ -2761,11 +2846,11 @@ pub const Type = extern union { } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. - pub fn ptrAlignment(ty: Type, target: Target) u32 { - return ptrAlignmentAdvanced(ty, target, null) catch unreachable; + pub fn ptrAlignment(ty: Type, mod: *const Module) u32 { + return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; } - pub fn ptrAlignmentAdvanced(ty: Type, target: Target, opt_sema: ?*Sema) !u32 { + pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 { switch (ty.tag()) { .single_const_pointer, .single_mut_pointer, @@ -2780,10 +2865,10 @@ pub const Type = extern union { => { const child_type = ty.cast(Payload.ElemType).?.data; if (opt_sema) |sema| { - const res = try child_type.abiAlignmentAdvanced(target, .{ .sema = sema }); + const res = try child_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; } - return (child_type.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + return (child_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; }, .manyptr_u8, @@ -2798,13 +2883,13 @@ pub const Type = extern union { if (ptr_info.@"align" != 0) { return ptr_info.@"align"; } else if (opt_sema) |sema| { - const res = try ptr_info.pointee_type.abiAlignmentAdvanced(target, .{ .sema = sema }); + const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); return res.scalar; } else { - return (ptr_info.pointee_type.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } }, - .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(target, opt_sema), + .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema), else => unreachable, } @@ -2843,13 +2928,13 @@ pub const Type = extern union { } /// Returns 0 for 0-bit types. - pub fn abiAlignment(ty: Type, target: Target) u32 { - return (ty.abiAlignmentAdvanced(target, .eager) catch unreachable).scalar; + pub fn abiAlignment(ty: Type, mod: *const Module) u32 { + return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } /// May capture a reference to `ty`. - pub fn lazyAbiAlignment(ty: Type, target: Target, arena: Allocator) !Value { - switch (try ty.abiAlignmentAdvanced(target, .{ .lazy = arena })) { + pub fn lazyAbiAlignment(ty: Type, mod: *const Module, arena: Allocator) !Value { + switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, .scalar => |x| return Value.Tag.int_u64.create(arena, x), } @@ -2874,9 +2959,29 @@ pub const Type = extern union { /// necessary, possibly returning a CompileError. pub fn abiAlignmentAdvanced( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiAlignmentAdvanced { + const target = mod.getTarget(); + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; + return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + const opt_sema = switch (strat) { .sema => |sema| sema, else => null, @@ -2902,12 +3007,6 @@ pub const Type = extern union { .anyopaque, => return AbiAlignmentAdvanced{ .scalar = 1 }, - .fn_noreturn_no_args, // represents machine code; not a pointer - .fn_void_no_args, // represents machine code; not a pointer - .fn_naked_noreturn_no_args, // represents machine code; not a pointer - .fn_ccc_void_no_args, // represents machine code; not a pointer - => return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }, - // represents machine code; not a pointer .function => { const alignment = ty.castTag(.function).?.data.alignment; @@ -2958,12 +3057,11 @@ pub const Type = extern union { .f80 => switch (target.c_type_bit_size(.longdouble)) { 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, else => { - var payload: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = 80, + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, }; - const u80_ty = initPayload(&payload.base); - return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, target) }; + return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; }, }, .f128 => switch (target.c_type_bit_size(.longdouble)) { @@ -2980,11 +3078,11 @@ pub const Type = extern union { .error_set_merged, => return AbiAlignmentAdvanced{ .scalar = 2 }, - .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(target, strat), + .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(mod, strat), .vector => { const len = ty.arrayLen(); - const bits = try bitSizeAdvanced(ty.elemType(), target, opt_sema); + const bits = try bitSizeAdvanced(ty.elemType(), mod, opt_sema); const bytes = ((bits * len) + 7) / 8; const alignment = std.math.ceilPowerOfTwoAssert(u64, bytes); return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) }; @@ -2996,34 +3094,28 @@ pub const Type = extern union { .i64, .u64 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(64, target) }, .u128, .i128 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(128, target) }, - .int_signed, .int_unsigned => { - const bits: u16 = ty.cast(Payload.Bits).?.data; - if (bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; - return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(bits, target) }; - }, - .optional => { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); - switch (child_type.zigTypeTag()) { + switch (child_type.zigTypeTag(mod)) { .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat), + .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, else => {}, } switch (strat) { .eager, .sema => { - if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) { return AbiAlignmentAdvanced{ .scalar = 1 }; } - return child_type.abiAlignmentAdvanced(target, strat); + return child_type.abiAlignmentAdvanced(mod, strat); }, - .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(target, strat)) { + .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, .val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, }, @@ -3034,10 +3126,10 @@ pub const Type = extern union { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. const data = ty.castTag(.error_union).?.data; - const code_align = abiAlignment(Type.anyerror, target); + const code_align = abiAlignment(Type.anyerror, mod); switch (strat) { .eager, .sema => { - if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) { @@ -3045,11 +3137,11 @@ pub const Type = extern union { } return AbiAlignmentAdvanced{ .scalar = @max( code_align, - (try data.payload.abiAlignmentAdvanced(target, strat)).scalar, + (try data.payload.abiAlignmentAdvanced(mod, strat)).scalar, ) }; }, .lazy => |arena| { - switch (try data.payload.abiAlignmentAdvanced(target, strat)) { + switch (try data.payload.abiAlignmentAdvanced(mod, strat)) { .scalar => |payload_align| { return AbiAlignmentAdvanced{ .scalar = @max(code_align, payload_align), @@ -3089,20 +3181,20 @@ pub const Type = extern union { .eager => {}, } assert(struct_obj.haveLayout()); - return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(target) }; + return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; } const fields = ty.structFields(); var big_align: u32 = 0; for (fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) continue; const field_align = if (field.abi_align != 0) field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(target, strat)) { + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { .eager => unreachable, // struct layout not resolved @@ -3114,7 +3206,7 @@ pub const Type = extern union { // This logic is duplicated in Module.Struct.Field.alignment. if (struct_obj.layout == .Extern or target.ofmt == .c) { - if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { // The C ABI requires 128 bit integer fields of structs // to be 16-bytes aligned. big_align = @max(big_align, 16); @@ -3130,9 +3222,9 @@ pub const Type = extern union { for (tuple.types, 0..) |field_ty, i| { const val = tuple.values[i]; if (val.tag() != .unreachable_value) continue; // comptime field - if (!(field_ty.hasRuntimeBits())) continue; + if (!(field_ty.hasRuntimeBits(mod))) continue; - switch (try field_ty.abiAlignmentAdvanced(target, strat)) { + switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |field_align| big_align = @max(big_align, field_align), .val => switch (strat) { .eager => unreachable, // field type alignment not resolved @@ -3145,17 +3237,16 @@ pub const Type = extern union { }, .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(target) }; + const int_tag_ty = ty.intTagType(); + return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; }, .@"union" => { const union_obj = ty.castTag(.@"union").?.data; - return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, false); + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, false); }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return abiAlignmentAdvancedUnion(ty, target, strat, union_obj, true); + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true); }, .empty_struct, @@ -3181,7 +3272,7 @@ pub const Type = extern union { pub fn abiAlignmentAdvancedUnion( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -3195,6 +3286,7 @@ pub const Type = extern union { // We'll guess "pointer-aligned", if the union has an // underaligned pointer field then some allocations // might require explicit alignment. + const target = mod.getTarget(); return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; } _ = try sema.resolveTypeFields(ty); @@ -3206,23 +3298,23 @@ pub const Type = extern union { }; if (union_obj.fields.count() == 0) { if (have_tag) { - return abiAlignmentAdvanced(union_obj.tag_ty, target, strat); + return abiAlignmentAdvanced(union_obj.tag_ty, mod, strat); } else { return AbiAlignmentAdvanced{ .scalar = @boolToInt(union_obj.layout == .Extern) }; } } var max_align: u32 = 0; - if (have_tag) max_align = union_obj.tag_ty.abiAlignment(target); + if (have_tag) max_align = union_obj.tag_ty.abiAlignment(mod); for (union_obj.fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) continue; const field_align = if (field.abi_align != 0) field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(target, strat)) { + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |a| a, .val => switch (strat) { .eager => unreachable, // struct layout not resolved @@ -3236,8 +3328,8 @@ pub const Type = extern union { } /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, target: Target, arena: Allocator) !Value { - switch (try ty.abiSizeAdvanced(target, .{ .lazy = arena })) { + pub fn lazyAbiSize(ty: Type, mod: *const Module, arena: Allocator) !Value { + switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, .scalar => |x| return Value.Tag.int_u64.create(arena, x), } @@ -3245,8 +3337,8 @@ pub const Type = extern union { /// Asserts the type has the ABI size already resolved. /// Types that return false for hasRuntimeBits() return 0. - pub fn abiSize(ty: Type, target: Target) u64 { - return (abiSizeAdvanced(ty, target, .eager) catch unreachable).scalar; + pub fn abiSize(ty: Type, mod: *const Module) u64 { + return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; } const AbiSizeAdvanced = union(enum) { @@ -3262,14 +3354,30 @@ pub const Type = extern union { /// necessary, possibly returning a CompileError. pub fn abiSizeAdvanced( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiSizeAdvanced { + const target = mod.getTarget(); + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; + return AbiSizeAdvanced{ .scalar = intAbiSize(int_type.bits, target) }; + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + switch (ty.tag()) { - .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_void_no_args => unreachable, // represents machine code; not a pointer - .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer .function => unreachable, // represents machine code; not a pointer .@"opaque" => unreachable, // no size available .noreturn => unreachable, @@ -3308,7 +3416,7 @@ pub const Type = extern union { .eager => {}, } assert(struct_obj.haveLayout()); - return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(target) }; + return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; }, else => { switch (strat) { @@ -3327,22 +3435,21 @@ pub const Type = extern union { if (field_count == 0) { return AbiSizeAdvanced{ .scalar = 0 }; } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, target) }; + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(target) }; + const int_tag_ty = ty.intTagType(); + return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; }, .@"union" => { const union_obj = ty.castTag(.@"union").?.data; - return abiSizeAdvancedUnion(ty, target, strat, union_obj, false); + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, false); }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return abiSizeAdvancedUnion(ty, target, strat, union_obj, true); + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); }, .u1, @@ -3361,7 +3468,7 @@ pub const Type = extern union { .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 }, .array => { const payload = ty.castTag(.array).?.data; - switch (try payload.elem_type.abiSizeAdvanced(target, strat)) { + switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = payload.len * elem_size }, .val => switch (strat) { .sema => unreachable, @@ -3372,7 +3479,7 @@ pub const Type = extern union { }, .array_sentinel => { const payload = ty.castTag(.array_sentinel).?.data; - switch (try payload.elem_type.abiSizeAdvanced(target, strat)) { + switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| return AbiSizeAdvanced{ .scalar = (payload.len + 1) * elem_size }, .val => switch (strat) { .sema => unreachable, @@ -3391,10 +3498,10 @@ pub const Type = extern union { .val = try Value.Tag.lazy_size.create(arena, ty), }, }; - const elem_bits = try payload.elem_type.bitSizeAdvanced(target, opt_sema); + const elem_bits = try payload.elem_type.bitSizeAdvanced(mod, opt_sema); const total_bits = elem_bits * payload.len; const total_bytes = (total_bits + 7) / 8; - const alignment = switch (try ty.abiAlignmentAdvanced(target, strat)) { + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| x, .val => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty), @@ -3450,12 +3557,11 @@ pub const Type = extern union { .f80 => switch (target.c_type_bit_size(.longdouble)) { 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, else => { - var payload: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = 80, + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, }; - const u80_ty = initPayload(&payload.base); - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, target) }; + return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; }, }, @@ -3473,11 +3579,6 @@ pub const Type = extern union { .i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) }, .i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) }, .u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) }, - .int_signed, .int_unsigned => { - const bits: u16 = ty.cast(Payload.Bits).?.data; - if (bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; - return AbiSizeAdvanced{ .scalar = intAbiSize(bits, target) }; - }, .optional => { var buf: Payload.ElemType = undefined; @@ -3487,16 +3588,16 @@ pub const Type = extern union { return AbiSizeAdvanced{ .scalar = 0 }; } - if (!(child_type.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, else => |e| return e, })) return AbiSizeAdvanced{ .scalar = 1 }; - if (ty.optionalReprIsPayload()) { - return abiSizeAdvanced(child_type, target, strat); + if (ty.optionalReprIsPayload(mod)) { + return abiSizeAdvanced(child_type, mod, strat); } - const payload_size = switch (try child_type.abiSizeAdvanced(target, strat)) { + const payload_size = switch (try child_type.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| elem_size, .val => switch (strat) { .sema => unreachable, @@ -3510,7 +3611,7 @@ pub const Type = extern union { // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal // to the child type's ABI alignment. return AbiSizeAdvanced{ - .scalar = child_type.abiAlignment(target) + payload_size, + .scalar = child_type.abiAlignment(mod) + payload_size, }; }, @@ -3518,17 +3619,17 @@ pub const Type = extern union { // This code needs to be kept in sync with the equivalent switch prong // in abiAlignmentAdvanced. const data = ty.castTag(.error_union).?.data; - const code_size = abiSize(Type.anyerror, target); - if (!(data.payload.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + const code_size = abiSize(Type.anyerror, mod); + if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, else => |e| return e, })) { // Same as anyerror. return AbiSizeAdvanced{ .scalar = code_size }; } - const code_align = abiAlignment(Type.anyerror, target); - const payload_align = abiAlignment(data.payload, target); - const payload_size = switch (try data.payload.abiSizeAdvanced(target, strat)) { + const code_align = abiAlignment(Type.anyerror, mod); + const payload_align = abiAlignment(data.payload, mod); + const payload_size = switch (try data.payload.abiSizeAdvanced(mod, strat)) { .scalar => |elem_size| elem_size, .val => switch (strat) { .sema => unreachable, @@ -3556,7 +3657,7 @@ pub const Type = extern union { pub fn abiSizeAdvancedUnion( ty: Type, - target: Target, + mod: *const Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -3570,7 +3671,7 @@ pub const Type = extern union { }, .eager => {}, } - return AbiSizeAdvanced{ .scalar = union_obj.abiSize(target, have_tag) }; + return AbiSizeAdvanced{ .scalar = union_obj.abiSize(mod, have_tag) }; } fn intAbiSize(bits: u16, target: Target) u64 { @@ -3585,8 +3686,8 @@ pub const Type = extern union { ); } - pub fn bitSize(ty: Type, target: Target) u64 { - return bitSizeAdvanced(ty, target, null) catch unreachable; + pub fn bitSize(ty: Type, mod: *const Module) u64 { + return bitSizeAdvanced(ty, mod, null) catch unreachable; } /// If you pass `opt_sema`, any recursive type resolutions will happen if @@ -3594,15 +3695,29 @@ pub const Type = extern union { /// the type is fully resolved, and there will be no error, guaranteed. pub fn bitSizeAdvanced( ty: Type, - target: Target, + mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!u64 { + const target = mod.getTarget(); + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; + switch (ty.tag()) { - .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_void_no_args => unreachable, // represents machine code; not a pointer - .fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer - .fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer .function => unreachable, // represents machine code; not a pointer .anyopaque => unreachable, .type => unreachable, @@ -3633,68 +3748,67 @@ pub const Type = extern union { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; if (struct_obj.layout != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); assert(struct_obj.haveLayout()); - return try struct_obj.backing_int_ty.bitSizeAdvanced(target, opt_sema); + return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); }, .tuple, .anon_struct => { if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } var total: u64 = 0; for (ty.tupleFields().types) |field_ty| { - total += try bitSizeAdvanced(field_ty, target, opt_sema); + total += try bitSizeAdvanced(field_ty, mod, opt_sema); } return total; }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); - return try bitSizeAdvanced(int_tag_ty, target, opt_sema); + const int_tag_ty = ty.intTagType(); + return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); }, .@"union", .union_safety_tagged, .union_tagged => { if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(target, strat)).scalar * 8; + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; } const union_obj = ty.cast(Payload.Union).?.data; assert(union_obj.haveFieldTypes()); var size: u64 = 0; for (union_obj.fields.values()) |field| { - size = @max(size, try bitSizeAdvanced(field.ty, target, opt_sema)); + size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); } return size; }, .vector => { const payload = ty.castTag(.vector).?.data; - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); return elem_bit_size * payload.len; }, .array_u8 => return 8 * ty.castTag(.array_u8).?.data, .array_u8_sentinel_0 => return 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1), .array => { const payload = ty.castTag(.array).?.data; - const elem_size = std.math.max(payload.elem_type.abiAlignment(target), payload.elem_type.abiSize(target)); + const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); if (elem_size == 0 or payload.len == 0) return @as(u64, 0); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); return (payload.len - 1) * 8 * elem_size + elem_bit_size; }, .array_sentinel => { const payload = ty.castTag(.array_sentinel).?.data; const elem_size = std.math.max( - payload.elem_type.abiAlignment(target), - payload.elem_type.abiSize(target), + payload.elem_type.abiAlignment(mod), + payload.elem_type.abiSize(mod), ); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, target, opt_sema); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); return payload.len * 8 * elem_size + elem_bit_size; }, @@ -3757,12 +3871,10 @@ pub const Type = extern union { .error_set_merged, => return 16, // TODO revisit this when we have the concept of the error tag type - .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data, - .optional, .error_union => { // Optionals and error unions are not packed so their bitsize // includes padding bits. - return (try abiSizeAdvanced(ty, target, strat)).scalar * 8; + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; }, .atomic_order, @@ -3782,8 +3894,8 @@ pub const Type = extern union { /// Returns true if the type's layout is already resolved and it is safe /// to use `abiSize`, `abiAlignment` and `bitSize` on it. - pub fn layoutIsResolved(ty: Type) bool { - switch (ty.zigTypeTag()) { + pub fn layoutIsResolved(ty: Type, mod: *const Module) bool { + switch (ty.zigTypeTag(mod)) { .Struct => { if (ty.castTag(.@"struct")) |struct_ty| { return struct_ty.data.haveLayout(); @@ -3798,16 +3910,16 @@ pub const Type = extern union { }, .Array => { if (ty.arrayLenIncludingSentinel() == 0) return true; - return ty.childType().layoutIsResolved(); + return ty.childType().layoutIsResolved(mod); }, .Optional => { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - return payload_ty.layoutIsResolved(); + return payload_ty.layoutIsResolved(mod); }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); - return payload_ty.layoutIsResolved(); + return payload_ty.layoutIsResolved(mod); }, else => return true, } @@ -3994,13 +4106,13 @@ pub const Type = extern union { }; } - pub fn isAllowzeroPtr(self: Type) bool { + pub fn isAllowzeroPtr(self: Type, mod: *const Module) bool { return switch (self.tag()) { .pointer => { const payload = self.castTag(.pointer).?.data; return payload.@"allowzero"; }, - else => return self.zigTypeTag() == .Optional, + else => return self.zigTypeTag(mod) == .Optional, }; } @@ -4016,7 +4128,7 @@ pub const Type = extern union { }; } - pub fn isPtrAtRuntime(self: Type) bool { + pub fn isPtrAtRuntime(self: Type, mod: *const Module) bool { switch (self.tag()) { .c_const_pointer, .c_mut_pointer, @@ -4040,7 +4152,7 @@ pub const Type = extern union { .optional => { var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); - if (child_type.zigTypeTag() != .Pointer) return false; + if (child_type.zigTypeTag(mod) != .Pointer) return false; const info = child_type.ptrInfo().data; switch (info.size) { .Slice, .C => return false, @@ -4054,15 +4166,15 @@ pub const Type = extern union { /// For pointer-like optionals, returns true, otherwise returns the allowzero property /// of pointers. - pub fn ptrAllowsZero(ty: Type) bool { - if (ty.isPtrLikeOptional()) { + pub fn ptrAllowsZero(ty: Type, mod: *const Module) bool { + if (ty.isPtrLikeOptional(mod)) { return true; } return ty.ptrInfo().data.@"allowzero"; } /// See also `isPtrLikeOptional`. - pub fn optionalReprIsPayload(ty: Type) bool { + pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { switch (ty.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, @@ -4072,7 +4184,7 @@ pub const Type = extern union { .optional => { const child_ty = ty.castTag(.optional).?.data; - switch (child_ty.zigTypeTag()) { + switch (child_ty.zigTypeTag(mod)) { .Pointer => { const info = child_ty.ptrInfo().data; switch (info.size) { @@ -4093,7 +4205,7 @@ pub const Type = extern union { /// Returns true if the type is optional and would be lowered to a single pointer /// address value, using 0 for null. Note that this returns true for C pointers. - pub fn isPtrLikeOptional(self: Type) bool { + pub fn isPtrLikeOptional(self: Type, mod: *const Module) bool { switch (self.tag()) { .optional_single_const_pointer, .optional_single_mut_pointer, @@ -4103,7 +4215,7 @@ pub const Type = extern union { .optional => { const child_ty = self.castTag(.optional).?.data; - if (child_ty.zigTypeTag() != .Pointer) return false; + if (child_ty.zigTypeTag(mod) != .Pointer) return false; const info = child_ty.ptrInfo().data; switch (info.size) { .Slice, .C => return false, @@ -4166,7 +4278,7 @@ pub const Type = extern union { /// For [N]T, returns T. /// For []T, returns T. /// For anyframe->T, returns T. - pub fn elemType2(ty: Type) Type { + pub fn elemType2(ty: Type, mod: *const Module) Type { return switch (ty.tag()) { .vector => ty.castTag(.vector).?.data.elem_type, .array => ty.castTag(.array).?.data.elem_type, @@ -4181,7 +4293,7 @@ pub const Type = extern union { .single_const_pointer, .single_mut_pointer, - => ty.castPointer().?.data.shallowElemType(), + => ty.castPointer().?.data.shallowElemType(mod), .array_u8, .array_u8_sentinel_0, @@ -4197,7 +4309,7 @@ pub const Type = extern union { const info = ty.castTag(.pointer).?.data; const child_ty = info.pointee_type; if (info.size == .One) { - return child_ty.shallowElemType(); + return child_ty.shallowElemType(mod); } else { return child_ty; } @@ -4213,16 +4325,16 @@ pub const Type = extern union { }; } - fn shallowElemType(child_ty: Type) Type { - return switch (child_ty.zigTypeTag()) { + fn shallowElemType(child_ty: Type, mod: *const Module) Type { + return switch (child_ty.zigTypeTag(mod)) { .Array, .Vector => child_ty.childType(), else => child_ty, }; } /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type) Type { - return switch (ty.zigTypeTag()) { + pub fn scalarType(ty: Type, mod: *const Module) Type { + return switch (ty.zigTypeTag(mod)) { .Vector => ty.childType(), else => ty, }; @@ -4360,19 +4472,19 @@ pub const Type = extern union { return union_obj.fields.getIndex(name); } - pub fn unionHasAllZeroBitFieldTypes(ty: Type) bool { - return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(); + pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *const Module) bool { + return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(mod); } - pub fn unionGetLayout(ty: Type, target: Target) Module.Union.Layout { + pub fn unionGetLayout(ty: Type, mod: *const Module) Module.Union.Layout { switch (ty.tag()) { .@"union" => { const union_obj = ty.castTag(.@"union").?.data; - return union_obj.getLayout(target, false); + return union_obj.getLayout(mod, false); }, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.getLayout(target, true); + return union_obj.getLayout(mod, true); }, else => unreachable, } @@ -4441,8 +4553,8 @@ pub const Type = extern union { }; } - pub fn isError(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isError(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .ErrorUnion, .ErrorSet => true, else => false, }; @@ -4543,14 +4655,21 @@ pub const Type = extern union { } /// Returns true if and only if the type is a fixed-width integer. - pub fn isInt(self: Type) bool { - return self.isSignedInt() or self.isUnsignedInt(); + pub fn isInt(self: Type, mod: *const Module) bool { + return self.isSignedInt(mod) or self.isUnsignedInt(mod); } /// Returns true if and only if the type is a fixed-width, signed integer. - pub fn isSignedInt(self: Type) bool { - return switch (self.tag()) { - .int_signed, + pub fn isSignedInt(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.signedness == .signed, + .simple_type => |s| return switch (s) { + .c_char, .isize, .c_short, .c_int, .c_long, .c_longlong => true, + else => false, + }, + else => return false, + }; + return switch (ty.tag()) { .i8, .isize, .c_char, @@ -4569,9 +4688,16 @@ pub const Type = extern union { } /// Returns true if and only if the type is a fixed-width, unsigned integer. - pub fn isUnsignedInt(self: Type) bool { - return switch (self.tag()) { - .int_unsigned, + pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.signedness == .unsigned, + .simple_type => |s| return switch (s) { + .usize, .c_ushort, .c_uint, .c_ulong, .c_ulonglong => true, + else => false, + }, + else => return false, + }; + return switch (ty.tag()) { .usize, .c_ushort, .c_uint, @@ -4592,8 +4718,8 @@ pub const Type = extern union { /// Returns true for integers, enums, error sets, and packed structs. /// If this function returns true, then intInfo() can be called on the type. - pub fn isAbiInt(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isAbiInt(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Int, .Enum, .ErrorSet => true, .Struct => ty.containerLayout() == .Packed, else => false, @@ -4601,17 +4727,26 @@ pub const Type = extern union { } /// Asserts the type is an integer, enum, error set, or vector of one of them. - pub fn intInfo(self: Type, target: Target) std.builtin.Type.Int { - var ty = self; + pub fn intInfo(starting_ty: Type, mod: *const Module) InternPool.Key.IntType { + const target = mod.getTarget(); + var ty = starting_ty; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => unreachable, + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + while (true) switch (ty.tag()) { - .int_unsigned => return .{ - .signedness = .unsigned, - .bits = ty.castTag(.int_unsigned).?.data, - }, - .int_signed => return .{ - .signedness = .signed, - .bits = ty.castTag(.int_signed).?.data, - }, .u1 => return .{ .signedness = .unsigned, .bits = 1 }, .u8 => return .{ .signedness = .unsigned, .bits = 8 }, .i8 => return .{ .signedness = .signed, .bits = 8 }, @@ -4729,32 +4864,14 @@ pub const Type = extern union { /// Asserts the type is a function. pub fn fnParamLen(self: Type) usize { - return switch (self.tag()) { - .fn_noreturn_no_args => 0, - .fn_void_no_args => 0, - .fn_naked_noreturn_no_args => 0, - .fn_ccc_void_no_args => 0, - .function => self.castTag(.function).?.data.param_types.len, - - else => unreachable, - }; + return self.castTag(.function).?.data.param_types.len; } /// Asserts the type is a function. The length of the slice must be at least the length /// given by `fnParamLen`. pub fn fnParamTypes(self: Type, types: []Type) void { - switch (self.tag()) { - .fn_noreturn_no_args => return, - .fn_void_no_args => return, - .fn_naked_noreturn_no_args => return, - .fn_ccc_void_no_args => return, - .function => { - const payload = self.castTag(.function).?.data; - @memcpy(types[0..payload.param_types.len], payload.param_types); - }, - - else => unreachable, - } + const payload = self.castTag(.function).?.data; + @memcpy(types[0..payload.param_types.len], payload.param_types); } /// Asserts the type is a function. @@ -4769,33 +4886,15 @@ pub const Type = extern union { } } - /// Asserts the type is a function. - pub fn fnReturnType(self: Type) Type { - return switch (self.tag()) { - .fn_noreturn_no_args => Type.initTag(.noreturn), - .fn_naked_noreturn_no_args => Type.initTag(.noreturn), - - .fn_void_no_args, - .fn_ccc_void_no_args, - => Type.initTag(.void), - - .function => self.castTag(.function).?.data.return_type, - - else => unreachable, - }; + /// Asserts the type is a function or a function pointer. + pub fn fnReturnType(ty: Type) Type { + const fn_ty = if (ty.castPointer()) |p| p.data else ty; + return fn_ty.castTag(.function).?.data.return_type; } /// Asserts the type is a function. pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention { - return switch (self.tag()) { - .fn_noreturn_no_args => .Unspecified, - .fn_void_no_args => .Unspecified, - .fn_naked_noreturn_no_args => .Naked, - .fn_ccc_void_no_args => .C, - .function => self.castTag(.function).?.data.cc, - - else => unreachable, - }; + return self.castTag(.function).?.data.cc; } /// Asserts the type is a function. @@ -4809,15 +4908,15 @@ pub const Type = extern union { }; } - pub fn isValidParamType(self: Type) bool { - return switch (self.zigTypeTagOrPoison() catch return true) { + pub fn isValidParamType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { .Undefined, .Null, .Opaque, .NoReturn => false, else => true, }; } - pub fn isValidReturnType(self: Type) bool { - return switch (self.zigTypeTagOrPoison() catch return true) { + pub fn isValidReturnType(self: Type, mod: *const Module) bool { + return switch (self.zigTypeTagOrPoison(mod) catch return true) { .Undefined, .Null, .Opaque => false, else => true, }; @@ -4825,87 +4924,43 @@ pub const Type = extern union { /// Asserts the type is a function. pub fn fnIsVarArgs(self: Type) bool { - return switch (self.tag()) { - .fn_noreturn_no_args => false, - .fn_void_no_args => false, - .fn_naked_noreturn_no_args => false, - .fn_ccc_void_no_args => false, - .function => self.castTag(.function).?.data.is_var_args, - - else => unreachable, - }; + return self.castTag(.function).?.data.is_var_args; } pub fn fnInfo(ty: Type) Payload.Function.Data { - return switch (ty.tag()) { - .fn_noreturn_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.noreturn), - .cc = .Unspecified, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_void_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.void), - .cc = .Unspecified, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_naked_noreturn_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.noreturn), - .cc = .Naked, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .fn_ccc_void_no_args => .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = initTag(.void), - .cc = .C, - .alignment = 0, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - .noalias_bits = 0, - }, - .function => ty.castTag(.function).?.data, - - else => unreachable, - }; + return ty.castTag(.function).?.data; } - pub fn isNumeric(self: Type) bool { - return switch (self.tag()) { + pub fn isNumeric(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => true, + .simple_type => |s| return switch (s) { + .f16, + .f32, + .f64, + .f80, + .f128, + .c_longdouble, + .comptime_int, + .comptime_float, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + => true, + + else => false, + }, + else => false, + }; + return switch (ty.tag()) { .f16, .f32, .f64, @@ -4937,8 +4992,6 @@ pub const Type = extern union { .c_ulong, .c_longlong, .c_ulonglong, - .int_unsigned, - .int_signed, => true, else => false, @@ -4947,8 +5000,30 @@ pub const Type = extern union { /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type) ?Value { + pub fn onePossibleValue(starting_type: Type, mod: *const Module) ?Value { var ty = starting_type; + + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return Value.zero; + } else { + return null; + } + }, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + while (true) switch (ty.tag()) { .f16, .f32, @@ -4988,10 +5063,6 @@ pub const Type = extern union { .error_set_single, .error_set, .error_set_merged, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, .single_const_pointer_to_comptime_int, .array_sentinel, @@ -5047,7 +5118,7 @@ pub const Type = extern union { assert(s.haveFieldTypes()); for (s.fields.values()) |field| { if (field.is_comptime) continue; - if (field.ty.onePossibleValue() != null) continue; + if (field.ty.onePossibleValue(mod) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -5058,7 +5129,7 @@ pub const Type = extern union { for (tuple.values, 0..) |val, i| { const is_comptime = val.tag() != .unreachable_value; if (is_comptime) continue; - if (tuple.types[i].onePossibleValue() != null) continue; + if (tuple.types[i].onePossibleValue(mod) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -5067,7 +5138,7 @@ pub const Type = extern union { .enum_numbered => { const enum_numbered = ty.castTag(.enum_numbered).?.data; // An explicit tag type is always provided for enum_numbered. - if (enum_numbered.tag_ty.hasRuntimeBits()) { + if (enum_numbered.tag_ty.hasRuntimeBits(mod)) { return null; } assert(enum_numbered.fields.count() == 1); @@ -5075,7 +5146,7 @@ pub const Type = extern union { }, .enum_full => { const enum_full = ty.castTag(.enum_full).?.data; - if (enum_full.tag_ty.hasRuntimeBits()) { + if (enum_full.tag_ty.hasRuntimeBits(mod)) { return null; } switch (enum_full.fields.count()) { @@ -5098,7 +5169,7 @@ pub const Type = extern union { }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (!tag_ty.hasRuntimeBits()) { + if (!tag_ty.hasRuntimeBits(mod)) { return Value.zero; } else { return null; @@ -5106,10 +5177,10 @@ pub const Type = extern union { }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = union_obj.tag_ty.onePossibleValue() orelse return null; + const tag_val = union_obj.tag_ty.onePossibleValue(mod) orelse return null; if (union_obj.fields.count() == 0) return Value.initTag(.unreachable_value); const only_field = union_obj.fields.values()[0]; - const val_val = only_field.ty.onePossibleValue() orelse return null; + const val_val = only_field.ty.onePossibleValue(mod) orelse return null; _ = tag_val; _ = val_val; return Value.initTag(.empty_struct_value); @@ -5121,17 +5192,10 @@ pub const Type = extern union { .null => return Value.initTag(.null_value), .undefined => return Value.initTag(.undef), - .int_unsigned, .int_signed => { - if (ty.cast(Payload.Bits).?.data == 0) { - return Value.zero; - } else { - return null; - } - }, .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) return Value.initTag(.empty_array); - if (ty.elemType().onePossibleValue() != null) + if (ty.elemType().onePossibleValue(mod) != null) return Value.initTag(.the_only_possible_value); return null; }, @@ -5146,7 +5210,22 @@ pub const Type = extern union { /// resolves field types rather than asserting they are already resolved. /// TODO merge these implementations together with the "advanced" pattern seen /// elsewhere in this file. - pub fn comptimeOnly(ty: Type) bool { + pub fn comptimeOnly(ty: Type, mod: *const Module) bool { + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => return false, + .ptr_type => @panic("TODO"), + .array_type => @panic("TODO"), + .vector_type => @panic("TODO"), + .optional_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => @panic("TODO"), + .struct_type => @panic("TODO"), + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, // it's a value, not a type + }; + return switch (ty.tag()) { .u1, .u8, @@ -5211,8 +5290,6 @@ pub const Type = extern union { .generic_poison, .array_u8, .array_u8_sentinel_0, - .int_signed, - .int_unsigned, .enum_simple, => false, @@ -5223,10 +5300,6 @@ pub const Type = extern union { .enum_literal, .type_info, // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .function, => true, @@ -5236,7 +5309,7 @@ pub const Type = extern union { .array, .array_sentinel, .vector, - => return ty.childType().comptimeOnly(), + => return ty.childType().comptimeOnly(mod), .pointer, .single_const_pointer, @@ -5249,10 +5322,10 @@ pub const Type = extern union { .mut_slice, => { const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { + if (child_ty.zigTypeTag(mod) == .Fn) { return false; } else { - return child_ty.comptimeOnly(); + return child_ty.comptimeOnly(mod); } }, @@ -5261,14 +5334,14 @@ pub const Type = extern union { .optional_single_const_pointer, => { var buf: Type.Payload.ElemType = undefined; - return ty.optionalChild(&buf).comptimeOnly(); + return ty.optionalChild(&buf).comptimeOnly(mod); }, .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { const have_comptime_val = tuple.values[i].tag() != .unreachable_value; - if (!have_comptime_val and field_ty.comptimeOnly()) return true; + if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true; } return false; }, @@ -5301,48 +5374,48 @@ pub const Type = extern union { } }, - .error_union => return ty.errorUnionPayload().comptimeOnly(), + .error_union => return ty.errorUnionPayload().comptimeOnly(mod), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; - return child_ty.comptimeOnly(); + return child_ty.comptimeOnly(mod); }, .enum_numbered => { const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return tag_ty.comptimeOnly(); + return tag_ty.comptimeOnly(mod); }, .enum_full, .enum_nonexhaustive => { const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return tag_ty.comptimeOnly(); + return tag_ty.comptimeOnly(mod); }, }; } - pub fn isArrayOrVector(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, else => false, }; } - pub fn isIndexable(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn isIndexable(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, .Pointer => switch (ty.ptrSize()) { .Slice, .Many, .C => true, - .One => ty.elemType().zigTypeTag() == .Array, + .One => ty.elemType().zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, }; } - pub fn indexableHasLen(ty: Type) bool { - return switch (ty.zigTypeTag()) { + pub fn indexableHasLen(ty: Type, mod: *const Module) bool { + return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, .Pointer => switch (ty.ptrSize()) { .Many, .C => false, .Slice => true, - .One => ty.elemType().zigTypeTag() == .Array, + .One => ty.elemType().zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, @@ -5366,19 +5439,19 @@ pub const Type = extern union { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, arena: Allocator, target: Target) !Value { - const scalar = try minIntScalar(ty.scalarType(), arena, target); - if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) { + pub fn minInt(ty: Type, arena: Allocator, mod: *const Module) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), arena, mod); + if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { return scalar; } } - /// Asserts that self.zigTypeTag() == .Int. - pub fn minIntScalar(ty: Type, arena: Allocator, target: Target) !Value { - assert(ty.zigTypeTag() == .Int); - const info = ty.intInfo(target); + /// Asserts that self.zigTypeTag(mod) == .Int. + pub fn minIntScalar(ty: Type, arena: Allocator, mod: *const Module) !Value { + assert(ty.zigTypeTag(mod) == .Int); + const info = ty.intInfo(mod); if (info.bits == 0) { return Value.initTag(.the_only_possible_value); @@ -5405,9 +5478,9 @@ pub const Type = extern union { } // Works for vectors and vectors of integers. - pub fn maxInt(ty: Type, arena: Allocator, target: Target) !Value { - const scalar = try maxIntScalar(ty.scalarType(), arena, target); - if (ty.zigTypeTag() == .Vector and scalar.tag() != .the_only_possible_value) { + pub fn maxInt(ty: Type, arena: Allocator, mod: *const Module) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), arena, mod); + if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { return scalar; @@ -5415,9 +5488,9 @@ pub const Type = extern union { } /// Asserts that self.zigTypeTag() == .Int. - pub fn maxIntScalar(self: Type, arena: Allocator, target: Target) !Value { - assert(self.zigTypeTag() == .Int); - const info = self.intInfo(target); + pub fn maxIntScalar(self: Type, arena: Allocator, mod: *const Module) !Value { + assert(self.zigTypeTag(mod) == .Int); + const info = self.intInfo(mod); if (info.bits == 0) { return Value.initTag(.the_only_possible_value); @@ -5452,21 +5525,25 @@ pub const Type = extern union { } /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type, buffer: *Payload.Bits) Type { + pub fn intTagType(ty: Type) Type { switch (ty.tag()) { .enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty, .enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty, .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const field_count = enum_simple.fields.count(); - const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); - buffer.* = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - return Type.initPayload(&buffer.base); + @panic("TODO move enum_simple to use the intern pool"); + //const enum_simple = ty.castTag(.enum_simple).?.data; + //const field_count = enum_simple.fields.count(); + //const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); + //buffer.* = .{ + // .base = .{ .tag = .int_unsigned }, + // .data = bits, + //}; + //return Type.initPayload(&buffer.base); + }, + .union_tagged => { + @panic("TODO move union_tagged to use the intern pool"); + //return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer), }, - .union_tagged => return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer), else => unreachable, } } @@ -5566,7 +5643,7 @@ pub const Type = extern union { }; const end_val = Value.initPayload(&end_payload.base); if (int_val.compareAll(.gte, end_val, int_ty, m)) return null; - return @intCast(usize, int_val.toUnsignedInt(m.getTarget())); + return @intCast(usize, int_val.toUnsignedInt(m)); } }; switch (ty.tag()) { @@ -5598,11 +5675,7 @@ pub const Type = extern union { const enum_simple = ty.castTag(.enum_simple).?.data; const fields_len = enum_simple.fields.count(); const bits = std.math.log2_int_ceil(usize, fields_len); - var buffer: Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - const tag_ty = Type.initPayload(&buffer.base); + const tag_ty = mod.intType(.unsigned, bits) catch @panic("TODO: handle OOM here"); return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod); }, .atomic_order, @@ -5675,19 +5748,19 @@ pub const Type = extern union { } } - pub fn structFieldAlign(ty: Type, index: usize, target: Target) u32 { + pub fn structFieldAlign(ty: Type, index: usize, mod: *const Module) u32 { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout != .Packed); - return struct_obj.fields.values()[index].alignment(target, struct_obj.layout); + return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].normalAlignment(target); + return union_obj.fields.values()[index].normalAlignment(mod); }, - .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(target), - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(target), + .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), + .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), else => unreachable, } } @@ -5710,7 +5783,7 @@ pub const Type = extern union { } } - pub fn structFieldValueComptime(ty: Type, index: usize) ?Value { + pub fn structFieldValueComptime(ty: Type, mod: *const Module, index: usize) ?Value { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -5718,14 +5791,14 @@ pub const Type = extern union { if (field.is_comptime) { return field.default_val; } else { - return field.ty.onePossibleValue(); + return field.ty.onePossibleValue(mod); } }, .tuple => { const tuple = ty.castTag(.tuple).?.data; const val = tuple.values[index]; if (val.tag() == .unreachable_value) { - return tuple.types[index].onePossibleValue(); + return tuple.types[index].onePossibleValue(mod); } else { return val; } @@ -5734,7 +5807,7 @@ pub const Type = extern union { const anon_struct = ty.castTag(.anon_struct).?.data; const val = anon_struct.values[index]; if (val.tag() == .unreachable_value) { - return anon_struct.types[index].onePossibleValue(); + return anon_struct.types[index].onePossibleValue(mod); } else { return val; } @@ -5765,7 +5838,7 @@ pub const Type = extern union { } } - pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, target: Target) u32 { + pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *const Module) u32 { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); @@ -5774,9 +5847,9 @@ pub const Type = extern union { var elem_size_bits: u16 = undefined; var running_bits: u16 = 0; for (struct_obj.fields.values(), 0..) |f, i| { - if (!f.ty.hasRuntimeBits()) continue; + if (!f.ty.hasRuntimeBits(mod)) continue; - const field_bits = @intCast(u16, f.ty.bitSize(target)); + const field_bits = @intCast(u16, f.ty.bitSize(mod)); if (i == field_index) { bit_offset = running_bits; elem_size_bits = field_bits; @@ -5797,9 +5870,10 @@ pub const Type = extern union { offset: u64 = 0, big_align: u32 = 0, struct_obj: *Module.Struct, - target: Target, + module: *const Module, pub fn next(it: *StructOffsetIterator) ?FieldOffset { + const mod = it.module; var i = it.field; if (it.struct_obj.fields.count() <= i) return null; @@ -5811,35 +5885,35 @@ pub const Type = extern union { const field = it.struct_obj.fields.values()[i]; it.field += 1; - if (field.is_comptime or !field.ty.hasRuntimeBits()) { + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) { return FieldOffset{ .field = i, .offset = it.offset }; } - const field_align = field.alignment(it.target, it.struct_obj.layout); + const field_align = field.alignment(mod, it.struct_obj.layout); it.big_align = @max(it.big_align, field_align); const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align); - it.offset = field_offset + field.ty.abiSize(it.target); + it.offset = field_offset + field.ty.abiSize(mod); return FieldOffset{ .field = i, .offset = field_offset }; } }; /// Get an iterator that iterates over all the struct field, returning the field and /// offset of that field. Asserts that the type is a non-packed struct. - pub fn iterateStructOffsets(ty: Type, target: Target) StructOffsetIterator { + pub fn iterateStructOffsets(ty: Type, mod: *const Module) StructOffsetIterator { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); - return .{ .struct_obj = struct_obj, .target = target }; + return .{ .struct_obj = struct_obj, .module = mod }; } /// Supports structs and unions. - pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 { + pub fn structFieldOffset(ty: Type, index: usize, mod: *const Module) u64 { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); - var it = ty.iterateStructOffsets(target); + var it = ty.iterateStructOffsets(mod); while (it.next()) |field_offset| { if (index == field_offset.field) return field_offset.offset; @@ -5856,17 +5930,17 @@ pub const Type = extern union { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits()) { + if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { // comptime field if (i == index) return offset; continue; } - const field_align = field_ty.abiAlignment(target); + const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); offset = std.mem.alignForwardGeneric(u64, offset, field_align); if (i == index) return offset; - offset += field_ty.abiSize(target); + offset += field_ty.abiSize(mod); } offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); return offset; @@ -5875,7 +5949,7 @@ pub const Type = extern union { .@"union" => return 0, .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - const layout = union_obj.getLayout(target, true); + const layout = union_obj.getLayout(mod, true); if (layout.tag_align >= layout.payload_align) { // {Tag, Payload} return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); @@ -6050,10 +6124,6 @@ pub const Type = extern union { manyptr_u8, manyptr_const_u8, manyptr_const_u8_sentinel_0, - fn_noreturn_no_args, - fn_void_no_args, - fn_naked_noreturn_no_args, - fn_ccc_void_no_args, single_const_pointer_to_comptime_int, const_slice_u8, const_slice_u8_sentinel_0, @@ -6087,8 +6157,6 @@ pub const Type = extern union { c_mut_pointer, const_slice, mut_slice, - int_signed, - int_unsigned, function, optional, optional_single_mut_pointer, @@ -6157,10 +6225,6 @@ pub const Type = extern union { .enum_literal, .null, .undefined, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .single_const_pointer_to_comptime_int, .anyerror_void_error_union, .const_slice_u8, @@ -6204,10 +6268,6 @@ pub const Type = extern union { .anyframe_T, => Payload.ElemType, - .int_signed, - .int_unsigned, - => Payload.Bits, - .error_set => Payload.ErrorSet, .error_set_inferred => Payload.ErrorSetInferred, .error_set_merged => Payload.ErrorSetMerged, @@ -6232,7 +6292,10 @@ pub const Type = extern union { pub fn init(comptime t: Tag) file_struct.Type { comptime std.debug.assert(@enumToInt(t) < Tag.no_payload_count); - return .{ .tag_if_small_enough = t }; + return file_struct.Type{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = t }, + }; } pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type { @@ -6241,7 +6304,10 @@ pub const Type = extern union { .base = .{ .tag = t }, .data = data, }; - return file_struct.Type{ .ptr_otherwise = &p.base }; + return file_struct.Type{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &p.base }, + }; } pub fn Data(comptime t: Tag) type { @@ -6422,10 +6488,9 @@ pub const Type = extern union { runtime = std.math.maxInt(u32) - 1, _, }; - - pub fn alignment(data: Data, target: Target) u32 { + pub fn alignment(data: Data, mod: *const Module) u32 { if (data.@"align" != 0) return data.@"align"; - return abiAlignment(data.pointee_type, target); + return abiAlignment(data.pointee_type, mod); } }; }; @@ -6537,12 +6602,11 @@ pub const Type = extern union { pub const @"anyerror" = initTag(.anyerror); pub const @"anyopaque" = initTag(.anyopaque); pub const @"null" = initTag(.null); + pub const @"noreturn" = initTag(.noreturn); pub const err_int = Type.u16; pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { - const target = mod.getTarget(); - var d = data; if (d.size == .C) { @@ -6554,8 +6618,8 @@ pub const Type = extern union { // pointee type needs to be resolved more, that needs to be done before calling // this ptr() function. if (d.@"align" != 0) canonicalize: { - if (!d.pointee_type.layoutIsResolved()) break :canonicalize; - if (d.@"align" == d.pointee_type.abiAlignment(target)) { + if (!d.pointee_type.layoutIsResolved(mod)) break :canonicalize; + if (d.@"align" == d.pointee_type.abiAlignment(mod)) { d.@"align" = 0; } } @@ -6565,7 +6629,7 @@ pub const Type = extern union { // needs to be resolved before calling this ptr() function. if (d.host_size != 0) { assert(d.bit_offset < d.host_size * 8); - if (d.host_size * 8 == d.pointee_type.bitSize(target)) { + if (d.host_size * 8 == d.pointee_type.bitSize(mod)) { assert(d.bit_offset == 0); d.host_size = 0; } @@ -6676,7 +6740,7 @@ pub const Type = extern union { payload: Type, mod: *Module, ) Allocator.Error!Type { - assert(error_set.zigTypeTag() == .ErrorSet); + assert(error_set.zigTypeTag(mod) == .ErrorSet); if (error_set.eql(Type.anyerror, mod) and payload.eql(Type.void, mod)) { @@ -6696,83 +6760,6 @@ pub const Type = extern union { return @intCast(u16, base + @boolToInt(upper < max)); } - pub fn smallestUnsignedInt(arena: Allocator, max: u64) !Type { - const bits = smallestUnsignedBits(max); - return intWithBits(arena, false, bits); - } - - pub fn intWithBits(arena: Allocator, sign: bool, bits: u16) !Type { - return if (sign) switch (bits) { - 8 => initTag(.i8), - 16 => initTag(.i16), - 32 => initTag(.i32), - 64 => initTag(.i64), - else => return Tag.int_signed.create(arena, bits), - } else switch (bits) { - 1 => initTag(.u1), - 8 => initTag(.u8), - 16 => initTag(.u16), - 32 => initTag(.u32), - 64 => initTag(.u64), - else => return Tag.int_unsigned.create(arena, bits), - }; - } - - /// Given a value representing an integer, returns the number of bits necessary to represent - /// this value in an integer. If `sign` is true, returns the number of bits necessary in a - /// twos-complement integer; otherwise in an unsigned integer. - /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. - pub fn intBitsForValue(target: Target, val: Value, sign: bool) u16 { - assert(!val.isUndef()); - switch (val.tag()) { - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; - return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - // Zero is still a possibility, in which case unsigned is fine - for (limbs) |limb| { - if (limb != 0) break; - } else return 0; // val == 0 - assert(sign); - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; - return @intCast(u16, big.bitCountTwosComp()); - }, - .int_i64 => { - const x = val.castTag(.int_i64).?.data; - if (x >= 0) return smallestUnsignedBits(@intCast(u64, x)); - assert(sign); - return smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; - }, - else => { - const x = val.toUnsignedInt(target); - return smallestUnsignedBits(x) + @boolToInt(sign); - }, - } - } - - /// Returns the smallest possible integer type containing both `min` and `max`. Asserts that neither - /// value is undef. - /// TODO: if #3806 is implemented, this becomes trivial - pub fn intFittingRange(target: Target, arena: Allocator, min: Value, max: Value) !Type { - assert(!min.isUndef()); - assert(!max.isUndef()); - - if (std.debug.runtime_safety) { - assert(Value.order(min, max, target).compare(.lte)); - } - - const sign = min.orderAgainstZero() == .lt; - - const min_val_bits = intBitsForValue(target, min, sign); - const max_val_bits = intBitsForValue(target, max, sign); - const bits = @max(min_val_bits, max_val_bits); - - return intWithBits(arena, sign, bits); - } - /// This is only used for comptime asserts. Bump this number when you make a change /// to packed struct layout to find out all the places in the codebase you need to edit! pub const packed_struct_layout_version = 2; diff --git a/src/value.zig b/src/value.zig index af2d7b1ca2..8c824b0720 100644 --- a/src/value.zig +++ b/src/value.zig @@ -11,17 +11,24 @@ const Module = @import("Module.zig"); const Air = @import("Air.zig"); const TypedValue = @import("TypedValue.zig"); const Sema = @import("Sema.zig"); - -/// This is the raw data, with no bookkeeping, no memory awareness, -/// no de-duplication, and no type system awareness. -/// It's important for this type to be small. -/// This union takes advantage of the fact that the first page of memory -/// is unmapped, giving us 4096 possible enum tags that have no payload. -pub const Value = extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, - ptr_otherwise: *Payload, +const InternPool = @import("InternPool.zig"); + +pub const Value = struct { + /// We are migrating towards using this for every Value object. However, many + /// values are still represented the legacy way. This is indicated by using + /// InternPool.Index.none. + ip_index: InternPool.Index, + + /// This is the raw data, with no bookkeeping, no memory awareness, + /// no de-duplication, and no type system awareness. + /// This union takes advantage of the fact that the first page of memory + /// is unmapped, giving us 4096 possible enum tags that have no payload. + legacy: extern union { + /// If the tag value is less than Tag.no_payload_count, then no pointer + /// dereference is needed. + tag_if_small_enough: Tag, + ptr_otherwise: *Payload, + }, // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { @@ -81,10 +88,6 @@ pub const Value = extern union { manyptr_u8_type, manyptr_const_u8_type, manyptr_const_u8_sentinel_0_type, - fn_noreturn_no_args_type, - fn_void_no_args_type, - fn_naked_noreturn_no_args_type, - fn_ccc_void_no_args_type, single_const_pointer_to_comptime_int_type, const_slice_u8_type, const_slice_u8_sentinel_0_type, @@ -108,7 +111,6 @@ pub const Value = extern union { // After this, the tag requires a payload. ty, - int_type, int_u64, int_i64, int_big_positive, @@ -232,10 +234,6 @@ pub const Value = extern union { .noreturn_type, .null_type, .undefined_type, - .fn_noreturn_no_args_type, - .fn_void_no_args_type, - .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .anyframe_type, .const_slice_u8_type, @@ -304,7 +302,6 @@ pub const Value = extern union { .lazy_size, => Payload.Ty, - .int_type => Payload.IntType, .int_u64 => Payload.U64, .int_i64 => Payload.I64, .function => Payload.Function, @@ -332,7 +329,10 @@ pub const Value = extern union { .base = .{ .tag = t }, .data = data, }; - return Value{ .ptr_otherwise = &ptr.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &ptr.base }, + }; } pub fn Data(comptime t: Tag) type { @@ -342,37 +342,47 @@ pub const Value = extern union { pub fn initTag(small_tag: Tag) Value { assert(@enumToInt(small_tag) < Tag.no_payload_count); - return .{ .tag_if_small_enough = small_tag }; + return Value{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = small_tag }, + }; } pub fn initPayload(payload: *Payload) Value { assert(@enumToInt(payload.tag) >= Tag.no_payload_count); - return .{ .ptr_otherwise = payload }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = payload }, + }; } pub fn tag(self: Value) Tag { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return self.tag_if_small_enough; + assert(self.ip_index == .none); + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return self.legacy.tag_if_small_enough; } else { - return self.ptr_otherwise.tag; + return self.legacy.ptr_otherwise.tag; } } /// Prefer `castTag` to this. pub fn cast(self: Value, comptime T: type) ?*T { + if (self.ip_index != .none) { + return null; + } if (@hasField(T, "base_tag")) { return self.castTag(T.base_tag); } - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { return null; } inline for (@typeInfo(Tag).Enum.fields) |field| { if (field.value < Tag.no_payload_count) continue; const t = @intToEnum(Tag, field.value); - if (self.ptr_otherwise.tag == t) { + if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { - return @fieldParentPtr(T, "base", self.ptr_otherwise); + return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); } return null; } @@ -381,11 +391,15 @@ pub const Value = extern union { } pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) + if (self.ip_index != .none) { + return null; + } + + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; - if (self.ptr_otherwise.tag == t) - return @fieldParentPtr(t.Type(), "base", self.ptr_otherwise); + if (self.legacy.ptr_otherwise.tag == t) + return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); return null; } @@ -393,9 +407,15 @@ pub const Value = extern union { /// It's intentional that this function is not passed a corresponding Type, so that /// a Value can be copied from a Sema to a Decl prior to resolving struct/union field types. pub fn copy(self: Value, arena: Allocator) error{OutOfMemory}!Value { - if (@enumToInt(self.tag_if_small_enough) < Tag.no_payload_count) { - return Value{ .tag_if_small_enough = self.tag_if_small_enough }; - } else switch (self.ptr_otherwise.tag) { + if (self.ip_index != .none) { + return Value{ .ip_index = self.ip_index, .legacy = undefined }; + } + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { + return Value{ + .ip_index = .none, + .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, + }; + } else switch (self.legacy.ptr_otherwise.tag) { .u1_type, .u8_type, .i8_type, @@ -435,10 +455,6 @@ pub const Value = extern union { .noreturn_type, .null_type, .undefined_type, - .fn_noreturn_no_args_type, - .fn_void_no_args_type, - .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .anyframe_type, .const_slice_u8_type, @@ -481,19 +497,24 @@ pub const Value = extern union { .base = payload.base, .data = try payload.data.copy(arena), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, - .int_type => return self.copyPayloadShallow(arena, Payload.IntType), .int_u64 => return self.copyPayloadShallow(arena, Payload.U64), .int_i64 => return self.copyPayloadShallow(arena, Payload.I64), .int_big_positive, .int_big_negative => { const old_payload = self.cast(Payload.BigInt).?; const new_payload = try arena.create(Payload.BigInt); new_payload.* = .{ - .base = .{ .tag = self.ptr_otherwise.tag }, + .base = .{ .tag = self.legacy.ptr_otherwise.tag }, .data = try arena.dupe(std.math.big.Limb, old_payload.data), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .function => return self.copyPayloadShallow(arena, Payload.Function), .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn), @@ -512,7 +533,10 @@ pub const Value = extern union { .container_ty = try payload.data.container_ty.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .comptime_field_ptr => { const payload = self.cast(Payload.ComptimeFieldPtr).?; @@ -524,7 +548,10 @@ pub const Value = extern union { .field_ty = try payload.data.field_ty.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .elem_ptr => { const payload = self.castTag(.elem_ptr).?; @@ -537,7 +564,10 @@ pub const Value = extern union { .index = payload.data.index, }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .field_ptr => { const payload = self.castTag(.field_ptr).?; @@ -550,7 +580,10 @@ pub const Value = extern union { .field_index = payload.data.field_index, }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .bytes => { const bytes = self.castTag(.bytes).?.data; @@ -559,7 +592,10 @@ pub const Value = extern union { .base = .{ .tag = .bytes }, .data = try arena.dupe(u8, bytes), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit), .repeated, @@ -574,7 +610,10 @@ pub const Value = extern union { .base = payload.base, .data = try payload.data.copy(arena), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .slice => { const payload = self.castTag(.slice).?; @@ -586,7 +625,10 @@ pub const Value = extern union { .len = try payload.data.len.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .float_16 => return self.copyPayloadShallow(arena, Payload.Float_16), .float_32 => return self.copyPayloadShallow(arena, Payload.Float_32), @@ -600,7 +642,10 @@ pub const Value = extern union { .base = payload.base, .data = try arena.dupe(u8, payload.data), }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .enum_field_index => return self.copyPayloadShallow(arena, Payload.U32), .@"error" => return self.copyPayloadShallow(arena, Payload.Error), @@ -615,7 +660,10 @@ pub const Value = extern union { for (new_payload.data, 0..) |*elem, i| { elem.* = try payload.data[i].copy(arena); } - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .@"union" => { @@ -628,7 +676,10 @@ pub const Value = extern union { .val = try tag_and_val.val.copy(arena), }, }; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; }, .inferred_alloc => unreachable, @@ -640,7 +691,10 @@ pub const Value = extern union { const payload = self.cast(T).?; const new_payload = try arena.create(T); new_payload.* = payload.*; - return Value{ .ptr_otherwise = &new_payload.base }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; } pub fn format(val: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { @@ -660,6 +714,10 @@ pub const Value = extern union { out_stream: anytype, ) !void { comptime assert(fmt.len == 0); + if (start_val.ip_index != .none) { + try out_stream.print("(interned {d})", .{@enumToInt(start_val.ip_index)}); + return; + } var val = start_val; while (true) switch (val.tag()) { .u1_type => return out_stream.writeAll("u1"), @@ -701,10 +759,6 @@ pub const Value = extern union { .noreturn_type => return out_stream.writeAll("noreturn"), .null_type => return out_stream.writeAll("@Type(.Null)"), .undefined_type => return out_stream.writeAll("@Type(.Undefined)"), - .fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"), - .fn_void_no_args_type => return out_stream.writeAll("fn() void"), - .fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"), .anyframe_type => return out_stream.writeAll("anyframe"), .const_slice_u8_type => return out_stream.writeAll("[]const u8"), @@ -755,13 +809,6 @@ pub const Value = extern union { try val.castTag(.lazy_size).?.data.dump("", options, out_stream); return try out_stream.writeAll(")"); }, - .int_type => { - const int_type = val.castTag(.int_type).?.data; - return out_stream.print("{s}{d}", .{ - if (int_type.signed) "s" else "u", - int_type.bits, - }); - }, .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", options, out_stream), .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), @@ -848,7 +895,6 @@ pub const Value = extern union { /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { - const target = mod.getTarget(); switch (val.tag()) { .bytes => { const bytes = val.castTag(.bytes).?.data; @@ -863,7 +909,7 @@ pub const Value = extern union { }, .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), .repeated => { - const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(target)); + const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen())); @memset(result, byte); return result; @@ -877,7 +923,7 @@ pub const Value = extern union { .the_only_possible_value => return &[_]u8{}, .slice => { const slice = val.castTag(.slice).?.data; - return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(target), allocator, mod); + return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); }, else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod), } @@ -888,15 +934,19 @@ pub const Value = extern union { var elem_value_buf: ElemValueBuffer = undefined; for (result, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf); - elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget())); + elem.* = @intCast(u8, elem_val.toUnsignedInt(mod)); } return result; } - pub const ToTypeBuffer = Type.Payload.Bits; - /// Asserts that the value is representable as a type. - pub fn toType(self: Value, buffer: *ToTypeBuffer) Type { + pub fn toType(self: Value) Type { + if (self.ip_index != .none) { + return .{ + .ip_index = self.ip_index, + .legacy = undefined, + }; + } return switch (self.tag()) { .ty => self.castTag(.ty).?.data, .u1_type => Type.initTag(.u1), @@ -938,10 +988,6 @@ pub const Value = extern union { .noreturn_type => Type.initTag(.noreturn), .null_type => Type.initTag(.null), .undefined_type => Type.initTag(.undefined), - .fn_noreturn_no_args_type => Type.initTag(.fn_noreturn_no_args), - .fn_void_no_args_type => Type.initTag(.fn_void_no_args), - .fn_naked_noreturn_no_args_type => Type.initTag(.fn_naked_noreturn_no_args), - .fn_ccc_void_no_args_type => Type.initTag(.fn_ccc_void_no_args), .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), .anyframe_type => Type.initTag(.@"anyframe"), .const_slice_u8_type => Type.initTag(.const_slice_u8), @@ -964,17 +1010,6 @@ pub const Value = extern union { .extern_options_type => Type.initTag(.extern_options), .type_info_type => Type.initTag(.type_info), - .int_type => { - const payload = self.castTag(.int_type).?.data; - buffer.* = .{ - .base = .{ - .tag = if (payload.signed) .int_signed else .int_unsigned, - }, - .data = payload.bits, - }; - return Type.initPayload(&buffer.base); - }, - else => unreachable, }; } @@ -1050,7 +1085,7 @@ pub const Value = extern union { } pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - if (ty.zigTypeTag() == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod); + if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod); const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, @@ -1068,10 +1103,9 @@ pub const Value = extern union { }; if (values.entries.len == 0) { // auto-numbered enum - break :field_index @intCast(u32, val.toUnsignedInt(mod.getTarget())); + break :field_index @intCast(u32, val.toUnsignedInt(mod)); } - var buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&buffer); + const int_tag_ty = ty.intTagType(); break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?); }, }; @@ -1086,15 +1120,15 @@ pub const Value = extern union { } /// Asserts the value is an integer. - pub fn toBigInt(val: Value, space: *BigIntSpace, target: Target) BigIntConst { - return val.toBigIntAdvanced(space, target, null) catch unreachable; + pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *const Module) BigIntConst { + return val.toBigIntAdvanced(space, mod, null) catch unreachable; } /// Asserts the value is an integer. pub fn toBigIntAdvanced( val: Value, space: *BigIntSpace, - target: Target, + mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { switch (val.tag()) { @@ -1114,7 +1148,7 @@ pub const Value = extern union { }, .runtime_value => { const sub_val = val.castTag(.runtime_value).?.data; - return sub_val.toBigIntAdvanced(space, target, opt_sema); + return sub_val.toBigIntAdvanced(space, mod, opt_sema); }, .int_u64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), .int_i64 => return BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), @@ -1128,7 +1162,7 @@ pub const Value = extern union { if (opt_sema) |sema| { try sema.resolveTypeLayout(ty); } - const x = ty.abiAlignment(target); + const x = ty.abiAlignment(mod); return BigIntMutable.init(&space.limbs, x).toConst(); }, .lazy_size => { @@ -1136,14 +1170,14 @@ pub const Value = extern union { if (opt_sema) |sema| { try sema.resolveTypeLayout(ty); } - const x = ty.abiSize(target); + const x = ty.abiSize(mod); return BigIntMutable.init(&space.limbs, x).toConst(); }, .elem_ptr => { const elem_ptr = val.castTag(.elem_ptr).?.data; - const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(target, opt_sema)).?; - const elem_size = elem_ptr.elem_ty.abiSize(target); + const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(mod, opt_sema)).?; + const elem_size = elem_ptr.elem_ty.abiSize(mod); const new_addr = array_addr + elem_size * elem_ptr.index; return BigIntMutable.init(&space.limbs, new_addr).toConst(); }, @@ -1154,13 +1188,13 @@ pub const Value = extern union { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedInt(val: Value, target: Target) ?u64 { - return getUnsignedIntAdvanced(val, target, null) catch unreachable; + pub fn getUnsignedInt(val: Value, mod: *const Module) ?u64 { + return getUnsignedIntAdvanced(val, mod, null) catch unreachable; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedIntAdvanced(val: Value, target: Target, opt_sema: ?*Sema) !?u64 { + pub fn getUnsignedIntAdvanced(val: Value, mod: *const Module, opt_sema: ?*Sema) !?u64 { switch (val.tag()) { .zero, .bool_false, @@ -1181,17 +1215,17 @@ pub const Value = extern union { .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { - return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; } else { - return ty.abiAlignment(target); + return ty.abiAlignment(mod); } }, .lazy_size => { const ty = val.castTag(.lazy_size).?.data; if (opt_sema) |sema| { - return (try ty.abiSizeAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar; } else { - return ty.abiSize(target); + return ty.abiSize(mod); } }, @@ -1200,12 +1234,12 @@ pub const Value = extern union { } /// Asserts the value is an integer and it fits in a u64 - pub fn toUnsignedInt(val: Value, target: Target) u64 { - return getUnsignedInt(val, target).?; + pub fn toUnsignedInt(val: Value, mod: *const Module) u64 { + return getUnsignedInt(val, mod).?; } /// Asserts the value is an integer and it fits in a i64 - pub fn toSignedInt(val: Value, target: Target) i64 { + pub fn toSignedInt(val: Value, mod: *const Module) i64 { switch (val.tag()) { .zero, .bool_false, @@ -1223,11 +1257,11 @@ pub const Value = extern union { .lazy_align => { const ty = val.castTag(.lazy_align).?.data; - return @intCast(i64, ty.abiAlignment(target)); + return @intCast(i64, ty.abiAlignment(mod)); }, .lazy_size => { const ty = val.castTag(.lazy_size).?.data; - return @intCast(i64, ty.abiSize(target)); + return @intCast(i64, ty.abiSize(mod)); }, .undef => unreachable, @@ -1276,17 +1310,17 @@ pub const Value = extern union { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef()) { - const size = @intCast(usize, ty.abiSize(target)); + const size = @intCast(usize, ty.abiSize(mod)); @memset(buffer[0..size], 0xaa); return; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => {}, .Bool => { buffer[0] = @boolToInt(val.toBool()); }, .Int, .Enum => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; @@ -1307,7 +1341,7 @@ pub const Value = extern union { }; } else { var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, target); + const bigint = int_val.toBigInt(&bigint_buffer, mod); bigint.writeTwosComplement(buffer[0..byte_count], endian); } }, @@ -1322,7 +1356,7 @@ pub const Value = extern union { .Array => { const len = ty.arrayLen(); const elem_ty = ty.childType(); - const elem_size = @intCast(usize, elem_ty.abiSize(target)); + const elem_size = @intCast(usize, elem_ty.abiSize(mod)); var elem_i: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; var buf_off: usize = 0; @@ -1335,7 +1369,7 @@ pub const Value = extern union { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, .Struct => switch (ty.containerLayout()) { @@ -1344,12 +1378,12 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = val.castTag(.aggregate).?.data; for (fields, 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, target)); + const off = @intCast(usize, ty.structFieldOffset(i, mod)); try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]); } }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, @@ -1363,7 +1397,7 @@ pub const Value = extern union { .Auto => return error.IllDefinedMemoryLayout, .Extern => return error.Unimplemented, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, }, @@ -1373,10 +1407,10 @@ pub const Value = extern union { return val.writeToMemory(Type.usize, mod, buffer); }, .Optional => { - if (!ty.isPtrLikeOptional()) return error.IllDefinedMemoryLayout; + if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout; var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); - const opt_val = val.optionalValue(); + const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); } else { @@ -1395,11 +1429,11 @@ pub const Value = extern union { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef()) { - const bit_size = @intCast(usize, ty.bitSize(target)); + const bit_size = @intCast(usize, ty.bitSize(mod)); std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); return; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => {}, .Bool => { const byte_index = switch (endian) { @@ -1413,8 +1447,8 @@ pub const Value = extern union { } }, .Int, .Enum => { - const bits = ty.intInfo(target).bits; - const abi_size = @intCast(usize, ty.abiSize(target)); + const bits = ty.intInfo(mod).bits; + const abi_size = @intCast(usize, ty.abiSize(mod)); var enum_buffer: Payload.U64 = undefined; const int_val = val.enumToInt(ty, &enum_buffer); @@ -1431,7 +1465,7 @@ pub const Value = extern union { std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian); } else { var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, target); + const bigint = int_val.toBigInt(&bigint_buffer, mod); bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian); } }, @@ -1445,7 +1479,7 @@ pub const Value = extern union { }, .Vector => { const elem_ty = ty.childType(); - const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); + const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); const len = @intCast(usize, ty.arrayLen()); var bits: u16 = 0; @@ -1467,7 +1501,7 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = val.castTag(.aggregate).?.data; for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(target)); + const field_bits = @intCast(u16, field.ty.bitSize(mod)); try field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits); bits += field_bits; } @@ -1479,7 +1513,7 @@ pub const Value = extern union { .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); const field_type = ty.unionFields().values()[field_index.?].ty; - const field_val = val.fieldValue(field_type, field_index.?); + const field_val = val.fieldValue(field_type, mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, @@ -1490,10 +1524,10 @@ pub const Value = extern union { return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); }, .Optional => { - assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); - const opt_val = val.optionalValue(); + const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); } else { @@ -1516,7 +1550,7 @@ pub const Value = extern union { ) Allocator.Error!Value { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => return Value.void, .Bool => { if (buffer[0] == 0) { @@ -1526,7 +1560,7 @@ pub const Value = extern union { } }, .Int, .Enum => { - const int_info = ty.intInfo(target); + const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; if (bits == 0 or buffer.len == 0) return Value.zero; @@ -1560,7 +1594,7 @@ pub const Value = extern union { }, .Array => { const elem_ty = ty.childType(); - const elem_size = elem_ty.abiSize(target); + const elem_size = elem_ty.abiSize(mod); const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); var offset: usize = 0; for (elems) |*elem| { @@ -1572,7 +1606,7 @@ pub const Value = extern union { .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, .Struct => switch (ty.containerLayout()) { @@ -1581,14 +1615,14 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = try arena.alloc(Value, fields.len); for (fields, 0..) |field, i| { - const off = @intCast(usize, ty.structFieldOffset(i, target)); - const sz = @intCast(usize, ty.structFieldType(i).abiSize(target)); + const off = @intCast(usize, ty.structFieldOffset(i, mod)); + const sz = @intCast(usize, ty.structFieldType(i).abiSize(mod)); field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); } return Tag.aggregate.create(arena, field_vals); }, .Packed => { - const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; + const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, }, @@ -1609,7 +1643,7 @@ pub const Value = extern union { return readFromMemory(Type.usize, mod, buffer, arena); }, .Optional => { - assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); return readFromMemory(child, mod, buffer, arena); @@ -1631,7 +1665,7 @@ pub const Value = extern union { ) Allocator.Error!Value { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void => return Value.void, .Bool => { const byte = switch (endian) { @@ -1646,8 +1680,8 @@ pub const Value = extern union { }, .Int, .Enum => { if (buffer.len == 0) return Value.zero; - const int_info = ty.intInfo(target); - const abi_size = @intCast(usize, ty.abiSize(target)); + const int_info = ty.intInfo(mod); + const abi_size = @intCast(usize, ty.abiSize(mod)); const bits = int_info.bits; if (bits == 0) return Value.zero; @@ -1677,7 +1711,7 @@ pub const Value = extern union { const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); var bits: u16 = 0; - const elem_bit_size = @intCast(u16, elem_ty.bitSize(target)); + const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); for (elems, 0..) |_, i| { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i; @@ -1694,7 +1728,7 @@ pub const Value = extern union { const fields = ty.structFields().values(); const field_vals = try arena.alloc(Value, fields.len); for (fields, 0..) |field, i| { - const field_bits = @intCast(u16, field.ty.bitSize(target)); + const field_bits = @intCast(u16, field.ty.bitSize(mod)); field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena); bits += field_bits; } @@ -1706,7 +1740,7 @@ pub const Value = extern union { return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena); }, .Optional => { - assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); var buf: Type.Payload.ElemType = undefined; const child = ty.optionalChild(&buf); return readFromPackedMemory(child, mod, buffer, bit_offset, arena); @@ -1764,8 +1798,8 @@ pub const Value = extern union { } } - pub fn clz(val: Value, ty: Type, target: Target) u64 { - const ty_bits = ty.intInfo(target).bits; + pub fn clz(val: Value, ty: Type, mod: *const Module) u64 { + const ty_bits = ty.intInfo(mod).bits; switch (val.tag()) { .zero, .bool_false => return ty_bits, .one, .bool_true => return ty_bits - 1, @@ -1792,7 +1826,7 @@ pub const Value = extern union { .lazy_align, .lazy_size => { var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, target, null) catch unreachable; + const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; return bigint.clz(ty_bits); }, @@ -1800,8 +1834,8 @@ pub const Value = extern union { } } - pub fn ctz(val: Value, ty: Type, target: Target) u64 { - const ty_bits = ty.intInfo(target).bits; + pub fn ctz(val: Value, ty: Type, mod: *const Module) u64 { + const ty_bits = ty.intInfo(mod).bits; switch (val.tag()) { .zero, .bool_false => return ty_bits, .one, .bool_true => return 0, @@ -1828,7 +1862,7 @@ pub const Value = extern union { .lazy_align, .lazy_size => { var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, target, null) catch unreachable; + const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; return bigint.ctz(); }, @@ -1836,7 +1870,7 @@ pub const Value = extern union { } } - pub fn popCount(val: Value, ty: Type, target: Target) u64 { + pub fn popCount(val: Value, ty: Type, mod: *const Module) u64 { assert(!val.isUndef()); switch (val.tag()) { .zero, .bool_false => return 0, @@ -1845,22 +1879,22 @@ pub const Value = extern union { .int_u64 => return @popCount(val.castTag(.int_u64).?.data), else => { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; - const int = val.toBigInt(&buffer, target); + const int = val.toBigInt(&buffer, mod); return @intCast(u64, int.popCount(info.bits)); }, } } - pub fn bitReverse(val: Value, ty: Type, target: Target, arena: Allocator) !Value { + pub fn bitReverse(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { assert(!val.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, target); + const operand_bigint = val.toBigInt(&buffer, mod); const limbs = try arena.alloc( std.math.big.Limb, @@ -1872,16 +1906,16 @@ pub const Value = extern union { return fromBigInt(arena, result_bigint.toConst()); } - pub fn byteSwap(val: Value, ty: Type, target: Target, arena: Allocator) !Value { + pub fn byteSwap(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { assert(!val.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); // Bit count must be evenly divisible by 8 assert(info.bits % 8 == 0); var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, target); + const operand_bigint = val.toBigInt(&buffer, mod); const limbs = try arena.alloc( std.math.big.Limb, @@ -1895,7 +1929,8 @@ pub const Value = extern union { /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. - pub fn intBitCountTwosComp(self: Value, target: Target) usize { + pub fn intBitCountTwosComp(self: Value, mod: *const Module) usize { + const target = mod.getTarget(); switch (self.tag()) { .zero, .bool_false, @@ -1926,7 +1961,7 @@ pub const Value = extern union { else => { var buffer: BigIntSpace = undefined; - return self.toBigInt(&buffer, target).bitCountTwosComp(); + return self.toBigInt(&buffer, mod).bitCountTwosComp(); }, } } @@ -1962,12 +1997,13 @@ pub const Value = extern union { }; } - pub fn orderAgainstZero(lhs: Value) std.math.Order { - return orderAgainstZeroAdvanced(lhs, null) catch unreachable; + pub fn orderAgainstZero(lhs: Value, mod: *const Module) std.math.Order { + return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; } pub fn orderAgainstZeroAdvanced( lhs: Value, + mod: *const Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { return switch (lhs.tag()) { @@ -1991,7 +2027,7 @@ pub const Value = extern union { // This is needed to correctly handle hashing the value. // Checks in Sema should prevent direct comparisons from reaching here. const val = lhs.castTag(.runtime_value).?.data; - return val.orderAgainstZeroAdvanced(opt_sema); + return val.orderAgainstZeroAdvanced(mod, opt_sema); }, .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), .int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0), @@ -2001,7 +2037,7 @@ pub const Value = extern union { .lazy_align => { const ty = lhs.castTag(.lazy_align).?.data; const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }) { @@ -2013,7 +2049,7 @@ pub const Value = extern union { .lazy_size => { const ty = lhs.castTag(.lazy_size).?.data; const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(false, strat) catch |err| switch (err) { + if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }) { @@ -2031,7 +2067,7 @@ pub const Value = extern union { .elem_ptr => { const elem_ptr = lhs.castTag(.elem_ptr).?.data; - switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(opt_sema)) { + switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) { .lt => unreachable, .gt => return .gt, .eq => { @@ -2049,17 +2085,17 @@ pub const Value = extern union { } /// Asserts the value is comparable. - pub fn order(lhs: Value, rhs: Value, target: Target) std.math.Order { - return orderAdvanced(lhs, rhs, target, null) catch unreachable; + pub fn order(lhs: Value, rhs: Value, mod: *const Module) std.math.Order { + return orderAdvanced(lhs, rhs, mod, null) catch unreachable; } /// Asserts the value is comparable. /// If opt_sema is null then this function asserts things are resolved and cannot fail. - pub fn orderAdvanced(lhs: Value, rhs: Value, target: Target, opt_sema: ?*Sema) !std.math.Order { + pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *const Module, opt_sema: ?*Sema) !std.math.Order { const lhs_tag = lhs.tag(); const rhs_tag = rhs.tag(); - const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(opt_sema); - const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(opt_sema); + const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); + const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); switch (lhs_against_zero) { .lt => if (rhs_against_zero != .lt) return .lt, .eq => return rhs_against_zero.invert(), @@ -2093,22 +2129,22 @@ pub const Value = extern union { var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, target, opt_sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, target, opt_sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema); return lhs_bigint.order(rhs_bigint); } /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. - pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, target: Target) bool { - return compareHeteroAdvanced(lhs, op, rhs, target, null) catch unreachable; + pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *const Module) bool { + return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; } pub fn compareHeteroAdvanced( lhs: Value, op: std.math.CompareOperator, rhs: Value, - target: Target, + mod: *const Module, opt_sema: ?*Sema, ) !bool { if (lhs.pointerDecl()) |lhs_decl| { @@ -2132,20 +2168,20 @@ pub const Value = extern union { else => {}, } } - return (try orderAdvanced(lhs, rhs, target, opt_sema)).compare(op); + return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op); } /// Asserts the values are comparable. Both operands have type `ty`. /// For vectors, returns true if comparison is true for ALL elements. pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen()) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(), mod)) { + if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod), mod)) { return false; } } @@ -2165,7 +2201,7 @@ pub const Value = extern union { return switch (op) { .eq => lhs.eql(rhs, ty, mod), .neq => !lhs.eql(rhs, ty, mod), - else => compareHetero(lhs, op, rhs, mod.getTarget()), + else => compareHetero(lhs, op, rhs, mod), }; } @@ -2231,7 +2267,7 @@ pub const Value = extern union { .float_128 => if (std.math.isNan(lhs.castTag(.float_128).?.data)) return op == .neq, else => {}, } - return (try orderAgainstZeroAdvanced(lhs, opt_sema)).compare(op); + return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); } pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { @@ -2346,7 +2382,7 @@ pub const Value = extern union { return true; } - if (ty.zigTypeTag() == .Struct) { + if (ty.zigTypeTag(mod) == .Struct) { const fields = ty.structFields().values(); assert(fields.len == a_field_vals.len); for (fields, 0..) |field, i| { @@ -2406,12 +2442,10 @@ pub const Value = extern union { return false; } - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Type => { - var buf_a: ToTypeBuffer = undefined; - var buf_b: ToTypeBuffer = undefined; - const a_type = a.toType(&buf_a); - const b_type = b.toType(&buf_b); + const a_type = a.toType(); + const b_type = b.toType(); return a_type.eql(b_type, mod); }, .Enum => { @@ -2419,8 +2453,7 @@ pub const Value = extern union { var buf_b: Payload.U64 = undefined; const a_val = a.enumToInt(ty, &buf_a); const b_val = b.enumToInt(ty, &buf_b); - var buf_ty: Type.Payload.Bits = undefined; - const int_ty = ty.intTagType(&buf_ty); + const int_ty = ty.intTagType(); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { @@ -2466,11 +2499,11 @@ pub const Value = extern union { // .the_one_possible_value, // .aggregate, // Note that we already checked above for matching tags, e.g. both .aggregate. - return ty.onePossibleValue() != null; + return ty.onePossibleValue(mod) != null; }, .Union => { // Here we have to check for value equality, as-if `a` has been coerced to `ty`. - if (ty.onePossibleValue() != null) { + if (ty.onePossibleValue(mod) != null) { return true; } if (a_ty.castTag(.anon_struct)) |payload| { @@ -2533,13 +2566,13 @@ pub const Value = extern union { else => {}, } if (a_tag == .null_value or a_tag == .@"error") return false; - return (try orderAdvanced(a, b, target, opt_sema)).compare(.eq); + return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } /// This function is used by hash maps and so treats floating-point NaNs as equal /// to each other, and not equal to other floating-point values. pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - const zig_ty_tag = ty.zigTypeTag(); + const zig_ty_tag = ty.zigTypeTag(mod); std.hash.autoHash(hasher, zig_ty_tag); if (val.isUndef()) return; // The value is runtime-known and shouldn't affect the hash. @@ -2555,8 +2588,7 @@ pub const Value = extern union { => {}, .Type => { - var buf: ToTypeBuffer = undefined; - return val.toType(&buf).hashWithHasher(hasher, mod); + return val.toType().hashWithHasher(hasher, mod); }, .Float => { // For hash/eql purposes, we treat floats as their IEEE integer representation. @@ -2588,7 +2620,7 @@ pub const Value = extern union { hash(slice.len, Type.usize, hasher, mod); }, - else => return hashPtr(val, hasher, mod.getTarget()), + else => return hashPtr(val, hasher, mod), }, .Array, .Vector => { const len = ty.arrayLen(); @@ -2648,7 +2680,7 @@ pub const Value = extern union { .Enum => { var enum_space: Payload.U64 = undefined; const int_val = val.enumToInt(ty, &enum_space); - hashInt(int_val, hasher, mod.getTarget()); + hashInt(int_val, hasher, mod); }, .Union => { const union_obj = val.cast(Payload.Union).?.data; @@ -2691,7 +2723,7 @@ pub const Value = extern union { // The value is runtime-known and shouldn't affect the hash. if (val.tag() == .runtime_value) return; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Opaque => unreachable, // Cannot hash opaque types .Void, .NoReturn, @@ -2700,8 +2732,7 @@ pub const Value = extern union { .Struct, // It sure would be nice to do something clever with structs. => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), .Type => { - var buf: ToTypeBuffer = undefined; - val.toType(&buf).hashWithHasher(hasher, mod); + val.toType().hashWithHasher(hasher, mod); }, .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { @@ -2711,7 +2742,7 @@ pub const Value = extern union { const ptr_ty = ty.slicePtrFieldType(&ptr_buf); slice.ptr.hashUncoerced(ptr_ty, hasher, mod); }, - else => val.hashPtr(hasher, mod.getTarget()), + else => val.hashPtr(hasher, mod), }, .Array, .Vector => { const len = ty.arrayLen(); @@ -2821,16 +2852,16 @@ pub const Value = extern union { }; } - fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, target: Target) void { + fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void { var buffer: BigIntSpace = undefined; - const big = int_val.toBigInt(&buffer, target); + const big = int_val.toBigInt(&buffer, mod); std.hash.autoHash(hasher, big.positive); for (big.limbs) |limb| { std.hash.autoHash(hasher, limb); } } - fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, target: Target) void { + fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void { switch (ptr_val.tag()) { .decl_ref, .decl_ref_mut, @@ -2847,25 +2878,25 @@ pub const Value = extern union { .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - hashPtr(elem_ptr.array_ptr, hasher, target); + hashPtr(elem_ptr.array_ptr, hasher, mod); std.hash.autoHash(hasher, Value.Tag.elem_ptr); std.hash.autoHash(hasher, elem_ptr.index); }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.field_ptr); - hashPtr(field_ptr.container_ptr, hasher, target); + hashPtr(field_ptr.container_ptr, hasher, mod); std.hash.autoHash(hasher, field_ptr.field_index); }, .eu_payload_ptr => { const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr); - hashPtr(err_union_ptr.container_ptr, hasher, target); + hashPtr(err_union_ptr.container_ptr, hasher, mod); }, .opt_payload_ptr => { const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr); - hashPtr(opt_ptr.container_ptr, hasher, target); + hashPtr(opt_ptr.container_ptr, hasher, mod); }, .zero, @@ -2880,7 +2911,7 @@ pub const Value = extern union { .the_only_possible_value, .lazy_align, .lazy_size, - => return hashInt(ptr_val, hasher, target), + => return hashInt(ptr_val, hasher, mod), else => unreachable, } @@ -2897,11 +2928,11 @@ pub const Value = extern union { pub fn sliceLen(val: Value, mod: *Module) u64 { return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod.getTarget()), + .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod), .decl_ref => { const decl_index = val.castTag(.decl_ref).?.data; const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Array) { + if (decl.ty.zigTypeTag(mod) == .Array) { return decl.ty.arrayLen(); } else { return 1; @@ -2910,7 +2941,7 @@ pub const Value = extern union { .decl_ref_mut => { const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag() == .Array) { + if (decl.ty.zigTypeTag(mod) == .Array) { return decl.ty.arrayLen(); } else { return 1; @@ -2918,7 +2949,7 @@ pub const Value = extern union { }, .comptime_field_ptr => { const payload = val.castTag(.comptime_field_ptr).?.data; - if (payload.field_ty.zigTypeTag() == .Array) { + if (payload.field_ty.zigTypeTag(mod) == .Array) { return payload.field_ty.arrayLen(); } else { return 1; @@ -3003,7 +3034,7 @@ pub const Value = extern union { if (data.container_ptr.pointerDecl()) |decl_index| { const container_decl = mod.declPtr(decl_index); const field_type = data.container_ty.structFieldType(data.field_index); - const field_val = container_decl.val.fieldValue(field_type, data.field_index); + const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); return field_val.elemValueAdvanced(mod, index, arena, buffer); } else unreachable; }, @@ -3032,10 +3063,7 @@ pub const Value = extern union { } /// Returns true if a Value is backed by a variable - pub fn isVariable( - val: Value, - mod: *Module, - ) bool { + pub fn isVariable(val: Value, mod: *Module) bool { return switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), @@ -3119,7 +3147,7 @@ pub const Value = extern union { }; } - pub fn fieldValue(val: Value, ty: Type, index: usize) Value { + pub fn fieldValue(val: Value, ty: Type, mod: *const Module, index: usize) Value { switch (val.tag()) { .aggregate => { const field_values = val.castTag(.aggregate).?.data; @@ -3131,14 +3159,14 @@ pub const Value = extern union { return payload.val; }, - .the_only_possible_value => return ty.onePossibleValue().?, + .the_only_possible_value => return ty.onePossibleValue(mod).?, .empty_struct_value => { if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); return tuple.values[index]; } - if (ty.structFieldValueComptime(index)) |some| { + if (ty.structFieldValueComptime(mod, index)) |some| { return some; } unreachable; @@ -3165,7 +3193,7 @@ pub const Value = extern union { index: usize, mod: *Module, ) Allocator.Error!Value { - const elem_ty = ty.elemType2(); + const elem_ty = ty.elemType2(mod); const ptr_val = switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr, else => val, @@ -3207,7 +3235,7 @@ pub const Value = extern union { switch (self.tag()) { .slice => { const payload = self.castTag(.slice).?; - const len = payload.data.len.toUnsignedInt(mod.getTarget()); + const len = payload.data.len.toUnsignedInt(mod); var elem_value_buf: ElemValueBuffer = undefined; var i: usize = 0; @@ -3233,7 +3261,7 @@ pub const Value = extern union { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. - pub fn isNull(self: Value) bool { + pub fn isNull(self: Value, mod: *const Module) bool { return switch (self.tag()) { .null_value => true, .opt_payload => false, @@ -3254,7 +3282,7 @@ pub const Value = extern union { .int_i64, .int_big_positive, .int_big_negative, - => self.orderAgainstZero().compare(.eq), + => self.orderAgainstZero(mod).compare(.eq), .undef => unreachable, .unreachable_value => unreachable, @@ -3300,8 +3328,8 @@ pub const Value = extern union { } /// Value of the optional, null if optional has no payload. - pub fn optionalValue(val: Value) ?Value { - if (val.isNull()) return null; + pub fn optionalValue(val: Value, mod: *const Module) ?Value { + if (val.isNull(mod)) return null; // Valid for optional representation to be the direct value // and not use opt_payload. @@ -3333,20 +3361,20 @@ pub const Value = extern union { } pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - const target = mod.getTarget(); - if (int_ty.zigTypeTag() == .Vector) { + if (int_ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, int_ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(), target, opt_sema); + scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(mod), mod, opt_sema); } return Value.Tag.aggregate.create(arena, result_data); } - return intToFloatScalar(val, arena, float_ty, target, opt_sema); + return intToFloatScalar(val, arena, float_ty, mod, opt_sema); } - pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, target: Target, opt_sema: ?*Sema) !Value { + pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { + const target = mod.getTarget(); switch (val.tag()) { .undef, .zero, .one => return val, .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 @@ -3369,17 +3397,17 @@ pub const Value = extern union { .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { - return intToFloatInner((try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar, arena, float_ty, target); + return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); } else { - return intToFloatInner(ty.abiAlignment(target), arena, float_ty, target); + return intToFloatInner(ty.abiAlignment(mod), arena, float_ty, target); } }, .lazy_size => { const ty = val.castTag(.lazy_size).?.data; if (opt_sema) |sema| { - return intToFloatInner((try ty.abiSizeAdvanced(target, .{ .sema = sema })).scalar, arena, float_ty, target); + return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); } else { - return intToFloatInner(ty.abiSize(target), arena, float_ty, target); + return intToFloatInner(ty.abiSize(mod), arena, float_ty, target); } }, else => unreachable, @@ -3446,19 +3474,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return intAddSatScalar(lhs, rhs, ty, arena, target); + return intAddSatScalar(lhs, rhs, ty, arena, mod); } /// Supports integers only; asserts neither operand is undefined. @@ -3467,17 +3494,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -3495,19 +3522,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return intSubSatScalar(lhs, rhs, ty, arena, target); + return intSubSatScalar(lhs, rhs, ty, arena, mod); } /// Supports integers only; asserts neither operand is undefined. @@ -3516,17 +3542,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -3543,8 +3569,7 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !OverflowArithmeticResult { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try arena.alloc(Value, ty.vectorLen()); const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -3552,7 +3577,7 @@ pub const Value = extern union { var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -3561,7 +3586,7 @@ pub const Value = extern union { .wrapped_result = try Value.Tag.aggregate.create(arena, result_data), }; } - return intMulWithOverflowScalar(lhs, rhs, ty, arena, target); + return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod); } pub fn intMulWithOverflowScalar( @@ -3569,14 +3594,14 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !OverflowArithmeticResult { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -3607,14 +3632,14 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3631,7 +3656,7 @@ pub const Value = extern union { ) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); - if (ty.zigTypeTag() == .ComptimeInt) { + if (ty.zigTypeTag(mod) == .ComptimeInt) { return intMul(lhs, rhs, ty, arena, mod); } @@ -3651,19 +3676,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return intMulSatScalar(lhs, rhs, ty, arena, target); + return intMulSatScalar(lhs, rhs, ty, arena, mod); } /// Supports (vectors of) integers only; asserts neither operand is undefined. @@ -3672,17 +3696,17 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { assert(!lhs.isUndef()); assert(!rhs.isUndef()); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.max( @@ -3702,24 +3726,24 @@ pub const Value = extern union { } /// Supports both floats and ints; handles undefined. - pub fn numberMax(lhs: Value, rhs: Value, target: Target) Value { + pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value { if (lhs.isUndef() or rhs.isUndef()) return undef; if (lhs.isNan()) return rhs; if (rhs.isNan()) return lhs; - return switch (order(lhs, rhs, target)) { + return switch (order(lhs, rhs, mod)) { .lt => rhs, .gt, .eq => lhs, }; } /// Supports both floats and ints; handles undefined. - pub fn numberMin(lhs: Value, rhs: Value, target: Target) Value { + pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value { if (lhs.isUndef() or rhs.isUndef()) return undef; if (lhs.isNan()) return rhs; if (rhs.isNan()) return lhs; - return switch (order(lhs, rhs, target)) { + return switch (order(lhs, rhs, mod)) { .lt => lhs, .gt, .eq => rhs, }; @@ -3727,24 +3751,23 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(), arena, target); + scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return bitwiseNotScalar(val, ty, arena, target); + return bitwiseNotScalar(val, ty, arena, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, target: Target) !Value { + pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (val.isUndef()) return Value.initTag(.undef); - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.bits == 0) { return val; @@ -3753,7 +3776,7 @@ pub const Value = extern union { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, target); + const val_bigint = val.toBigInt(&val_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), @@ -3766,31 +3789,30 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseAndScalar(lhs, rhs, allocator, target); + return bitwiseAndScalar(lhs, rhs, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { + pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives @@ -3803,14 +3825,14 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3823,41 +3845,40 @@ pub const Value = extern union { const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt()) + const all_ones = if (ty.isSignedInt(mod)) try Value.Tag.int_i64.create(arena, -1) else - try ty.maxInt(arena, mod.getTarget()); + try ty.maxInt(arena, mod); return bitwiseXor(anded, all_ones, ty, arena, mod); } /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseOrScalar(lhs, rhs, allocator, target); + return bitwiseOrScalar(lhs, rhs, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { + pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), @@ -3869,31 +3890,30 @@ pub const Value = extern union { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseXorScalar(lhs, rhs, allocator, target); + return bitwiseXorScalar(lhs, rhs, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, target: Target) !Value { + pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives @@ -3905,28 +3925,27 @@ pub const Value = extern union { } pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivScalar(lhs, rhs, allocator, target); + return intDivScalar(lhs, rhs, allocator, mod); } - pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -3946,28 +3965,27 @@ pub const Value = extern union { } pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivFloorScalar(lhs, rhs, allocator, target); + return intDivFloorScalar(lhs, rhs, allocator, mod); } - pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -3987,28 +4005,27 @@ pub const Value = extern union { } pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intModScalar(lhs, rhs, allocator, target); + return intModScalar(lhs, rhs, allocator, mod); } - pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs_q = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len, @@ -4064,14 +4081,14 @@ pub const Value = extern union { pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4111,14 +4128,14 @@ pub const Value = extern union { pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4157,28 +4174,27 @@ pub const Value = extern union { } pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intMulScalar(lhs, rhs, allocator, target); + return intMulScalar(lhs, rhs, allocator, mod); } - pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const rhs_bigint = rhs.toBigInt(&rhs_space, target); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -4194,17 +4210,16 @@ pub const Value = extern union { } pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, target); + scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, bits, target); + return intTruncScalar(val, allocator, signedness, bits, mod); } /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. @@ -4216,26 +4231,25 @@ pub const Value = extern union { bits: Value, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); var bits_buf: Value.ElemValueBuffer = undefined; const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(target)), target); + scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(target)), target); + return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); } - pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, target: Target) !Value { + pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (bits == 0) return Value.zero; var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, target); + const val_bigint = val.toBigInt(&val_space, mod); const limbs = try allocator.alloc( std.math.big.Limb, @@ -4248,27 +4262,26 @@ pub const Value = extern union { } pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shlScalar(lhs, rhs, allocator, target); + return shlScalar(lhs, rhs, allocator, mod); } - pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -4289,8 +4302,7 @@ pub const Value = extern union { allocator: Allocator, mod: *Module, ) !OverflowArithmeticResult { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try allocator.alloc(Value, ty.vectorLen()); const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { @@ -4298,7 +4310,7 @@ pub const Value = extern union { var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(), allocator, target); + const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -4307,7 +4319,7 @@ pub const Value = extern union { .wrapped_result = try Value.Tag.aggregate.create(allocator, result_data), }; } - return shlWithOverflowScalar(lhs, rhs, ty, allocator, target); + return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod); } pub fn shlWithOverflowScalar( @@ -4315,12 +4327,12 @@ pub const Value = extern union { rhs: Value, ty: Type, allocator: Allocator, - target: Target, + mod: *Module, ) !OverflowArithmeticResult { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try allocator.alloc( std.math.big.Limb, lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, @@ -4348,19 +4360,18 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, target); + scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return shlSatScalar(lhs, rhs, ty, arena, target); + return shlSatScalar(lhs, rhs, ty, arena, mod); } pub fn shlSatScalar( @@ -4368,15 +4379,15 @@ pub const Value = extern union { rhs: Value, ty: Type, arena: Allocator, - target: Target, + mod: *Module, ) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. - const info = ty.intInfo(target); + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const limbs = try arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits) + 1, @@ -4397,14 +4408,14 @@ pub const Value = extern union { arena: Allocator, mod: *Module, ) !Value { - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(), arena, mod); + scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4419,33 +4430,32 @@ pub const Value = extern union { mod: *Module, ) !Value { const shifted = try lhs.shl(rhs, ty, arena, mod); - const int_info = ty.intInfo(mod.getTarget()); + const int_info = ty.intInfo(mod); const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod); return truncated; } pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, target); + scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shrScalar(lhs, rhs, allocator, target); + return shrScalar(lhs, rhs, allocator, mod); } - pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, target: Target) !Value { + pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, target); - const shift = @intCast(usize, rhs.toUnsignedInt(target)); + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @intCast(usize, rhs.toUnsignedInt(mod)); const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8)); if (result_limbs == 0) { @@ -4478,12 +4488,12 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4514,14 +4524,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4573,14 +4583,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4632,14 +4642,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4691,14 +4701,14 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(), arena, target); + scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4744,12 +4754,12 @@ pub const Value = extern union { pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4784,12 +4794,12 @@ pub const Value = extern union { pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sinScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4824,12 +4834,12 @@ pub const Value = extern union { pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try cosScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4864,12 +4874,12 @@ pub const Value = extern union { pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try tanScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4904,12 +4914,12 @@ pub const Value = extern union { pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try expScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4944,12 +4954,12 @@ pub const Value = extern union { pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -4984,12 +4994,12 @@ pub const Value = extern union { pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try logScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5024,12 +5034,12 @@ pub const Value = extern union { pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5064,12 +5074,12 @@ pub const Value = extern union { pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5104,12 +5114,12 @@ pub const Value = extern union { pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5144,12 +5154,12 @@ pub const Value = extern union { pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floorScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5184,12 +5194,12 @@ pub const Value = extern union { pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5224,12 +5234,12 @@ pub const Value = extern union { pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try roundScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5264,12 +5274,12 @@ pub const Value = extern union { pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try truncScalar(elem_val, float_type.scalarType(), arena, target); + scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, target); } return Value.Tag.aggregate.create(arena, result_data); } @@ -5311,7 +5321,7 @@ pub const Value = extern union { mod: *Module, ) !Value { const target = mod.getTarget(); - if (float_type.zigTypeTag() == .Vector) { + if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen()); for (result_data, 0..) |*scalar, i| { var mulend1_buf: Value.ElemValueBuffer = undefined; @@ -5321,7 +5331,7 @@ pub const Value = extern union { var addend_buf: Value.ElemValueBuffer = undefined; const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf); scalar.* = try mulAddScalar( - float_type.scalarType(), + float_type.scalarType(mod), mulend1_elem, mulend2_elem, addend_elem, @@ -5380,8 +5390,7 @@ pub const Value = extern union { /// If the value is represented in-memory as a series of bytes that all /// have the same value, return that byte value, otherwise null. pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module, value_buffer: *Payload.U64) !?Value { - const target = mod.getTarget(); - const abi_size = std.math.cast(usize, ty.abiSize(target)) orelse return null; + const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; assert(abi_size >= 1); const byte_buffer = try mod.gpa.alloc(u8, abi_size); defer mod.gpa.free(byte_buffer); @@ -5549,16 +5558,6 @@ pub const Value = extern union { data: Type, }; - pub const IntType = struct { - pub const base_tag = Tag.int_type; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - bits: u16, - signed: bool, - }, - }; - pub const Float_16 = struct { pub const base_tag = Tag.float_16; @@ -5659,7 +5658,10 @@ pub const Value = extern union { pub const zero = initTag(.zero); pub const one = initTag(.one); - pub const negative_one: Value = .{ .ptr_otherwise = &negative_one_payload.base }; + pub const negative_one: Value = .{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &negative_one_payload.base }, + }; pub const undef = initTag(.undef); pub const @"void" = initTag(.void_value); pub const @"null" = initTag(.null_value); -- cgit v1.2.3 From 00f82f1c46126f1fc6655c6142ef16e8e5afbf4e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 2 May 2023 14:37:33 -0700 Subject: stage2: add `interned` AIR tag This required additionally passing the `InternPool` into some AIR methods. Also, implement `Type.isNoReturn` for interned types. --- src/Air.zig | 58 +++++---- src/InternPool.zig | 6 + src/Liveness.zig | 18 ++- src/Liveness/Verify.zig | 10 +- src/Module.zig | 3 +- src/Sema.zig | 92 ++----------- src/arch/aarch64/CodeGen.zig | 172 +++++++++++++------------ src/arch/arm/CodeGen.zig | 166 +++++++++++++----------- src/arch/riscv64/CodeGen.zig | 80 +++++++----- src/arch/sparc64/CodeGen.zig | 136 +++++++++++--------- src/arch/wasm/CodeGen.zig | 240 ++++++++++++++++++---------------- src/arch/x86_64/CodeGen.zig | 286 +++++++++++++++++++++-------------------- src/codegen/c.zig | 268 ++++++++++++++++++++------------------ src/codegen/llvm.zig | 299 ++++++++++++++++++++++--------------------- src/codegen/spirv.zig | 119 +++++++++-------- src/print_air.zig | 18 ++- src/type.zig | 40 ++++-- 17 files changed, 1050 insertions(+), 961 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index b60e8eda9d..be3ae119e4 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -11,6 +11,7 @@ const Air = @This(); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const InternPool = @import("InternPool.zig"); +const Module = @import("Module.zig"); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. @@ -401,6 +402,9 @@ pub const Inst = struct { constant, /// A comptime-known type. Uses the `ty` field. const_ty, + /// A comptime-known value via an index into the InternPool. + /// Uses the `interned` field. + interned, /// Notes the beginning of a source code statement and marks the line and column. /// Result type is always void. /// Uses the `dbg_stmt` field. @@ -928,6 +932,7 @@ pub const Inst = struct { pub const Data = union { no_op: void, un_op: Ref, + interned: InternPool.Index, bin_op: struct { lhs: Ref, @@ -1147,18 +1152,15 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { +pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { const ref_int = @enumToInt(inst); if (ref_int < InternPool.static_keys.len) { - return .{ - .ip_index = InternPool.static_keys[ref_int].typeOf(), - .legacy = undefined, - }; + return InternPool.static_keys[ref_int].typeOf().toType(); } - return air.typeOfIndex(ref_int - ref_start_index); + return air.typeOfIndex(ref_int - ref_start_index, ip); } -pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { const datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst]) { .add, @@ -1200,7 +1202,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .div_exact_optimized, .rem_optimized, .mod_optimized, - => return air.typeOf(datas[inst].bin_op.lhs), + => return air.typeOf(datas[inst].bin_op.lhs, ip), .sqrt, .sin, @@ -1218,7 +1220,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .trunc_float, .neg, .neg_optimized, - => return air.typeOf(datas[inst].un_op), + => return air.typeOf(datas[inst].un_op, ip), .cmp_lt, .cmp_lte, @@ -1280,6 +1282,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .try_ptr, => return air.getRefType(datas[inst].ty_pl.ty), + .interned => return ip.indexToKey(datas[inst].interned).typeOf().toType(), + .not, .bitcast, .load, @@ -1371,33 +1375,33 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0), .call, .call_always_tail, .call_never_tail, .call_never_inline => { - const callee_ty = air.typeOf(datas[inst].pl_op.operand); + const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); return callee_ty.fnReturnType(); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { - const ptr_ty = air.typeOf(datas[inst].bin_op.lhs); + const ptr_ty = air.typeOf(datas[inst].bin_op.lhs, ip); return ptr_ty.elemType(); }, .atomic_load => { - const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr); + const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr, ip); return ptr_ty.elemType(); }, .atomic_rmw => { - const ptr_ty = air.typeOf(datas[inst].pl_op.operand); + const ptr_ty = air.typeOf(datas[inst].pl_op.operand, ip); return ptr_ty.elemType(); }, - .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand).childType(), + .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand, ip).childType(), - .mul_add => return air.typeOf(datas[inst].pl_op.operand), + .mul_add => return air.typeOf(datas[inst].pl_op.operand, ip), .select => { const extra = air.extraData(Air.Bin, datas[inst].pl_op.payload).data; - return air.typeOf(extra.lhs); + return air.typeOf(extra.lhs, ip); }, .@"try" => { - const err_union_ty = air.typeOf(datas[inst].pl_op.operand); + const err_union_ty = air.typeOf(datas[inst].pl_op.operand, ip); return err_union_ty.errorUnionPayload(); }, @@ -1465,7 +1469,7 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const @import("Module.zig")) ?Value { +pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const Module) ?Value { const ref_int = @enumToInt(inst); if (ref_int < ref_start_index) { const ip_index = @intToEnum(InternPool.Index, ref_int); @@ -1476,7 +1480,7 @@ pub fn value(air: Air, inst: Air.Inst.Ref, mod: *const @import("Module.zig")) ?V switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], .const_ty => unreachable, - else => return air.typeOfIndex(inst_index).onePossibleValue(mod), + else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } } @@ -1489,10 +1493,11 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { return bytes[0..end :0]; } -/// Returns whether the given instruction must always be lowered, for instance because it can cause -/// side effects. If an instruction does not need to be lowered, and Liveness determines its result -/// is unused, backends should avoid lowering it. -pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { +/// Returns whether the given instruction must always be lowered, for instance +/// because it can cause side effects. If an instruction does not need to be +/// lowered, and Liveness determines its result is unused, backends should +/// avoid lowering it. +pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { const data = air.instructions.items(.data)[inst]; return switch (air.instructions.items(.tag)[inst]) { .arg, @@ -1631,6 +1636,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { .cmp_vector_optimized, .constant, .const_ty, + .interned, .is_null, .is_non_null, .is_null_ptr, @@ -1699,8 +1705,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { => false, .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0, - .load => air.typeOf(data.ty_op.operand).isVolatilePtr(), - .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs).isVolatilePtr(), - .atomic_load => air.typeOf(data.atomic_load.ptr).isVolatilePtr(), + .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtr(), + .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtr(), + .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtr(), }; } diff --git a/src/InternPool.zig b/src/InternPool.zig index cc3f0c1e4b..eadaf0da5e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -241,6 +241,11 @@ pub const Item = struct { /// When adding a tag to this enum, consider adding a corresponding entry to /// `primitives` in AstGen.zig. pub const Index = enum(u32) { + pub const first_type: Index = .u1_type; + pub const last_type: Index = .empty_struct_type; + pub const first_value: Index = .undef; + pub const last_value: Index = .empty_struct; + u1_type, u8_type, i8_type, @@ -329,6 +334,7 @@ pub const Index = enum(u32) { bool_false, /// `.{}` (untyped) empty_struct, + /// Used for generic parameters where the type and value /// is not known until generic function instantiation. generic_poison, diff --git a/src/Liveness.zig b/src/Liveness.zig index 45d0705008..01fbee9e36 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -131,7 +131,7 @@ fn LivenessPassData(comptime pass: LivenessPass) type { }; } -pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { +pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); @@ -144,6 +144,7 @@ pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { ), .extra = .{}, .special = .{}, + .intern_pool = intern_pool, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); @@ -322,6 +323,7 @@ pub fn categorizeOperand( .ret_ptr, .constant, .const_ty, + .interned, .trap, .breakpoint, .dbg_stmt, @@ -820,6 +822,7 @@ pub const BigTomb = struct { const Analysis = struct { gpa: Allocator, air: Air, + intern_pool: *const InternPool, tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), @@ -971,6 +974,7 @@ fn analyzeInst( .constant, .const_ty, + .interned, => unreachable, .trap, @@ -1255,6 +1259,7 @@ fn analyzeOperands( ) Allocator.Error!void { const gpa = a.gpa; const inst_tags = a.air.instructions.items(.tag); + const ip = a.intern_pool; switch (pass) { .loop_analysis => { @@ -1265,7 +1270,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty => continue, + .constant, .const_ty, .interned => continue, else => {}, } @@ -1290,7 +1295,7 @@ fn analyzeOperands( // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (!immediate_death or a.air.mustLower(inst)) { + if (!immediate_death or a.air.mustLower(inst, ip.*)) { // Note that it's important we iterate over the operands backwards, so that if a dying // operand is used multiple times we mark its last use as its death. var i = operands.len; @@ -1301,7 +1306,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty => continue, + .constant, .const_ty, .interned => continue, else => {}, } @@ -1821,6 +1826,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { /// Must be called with operands in reverse order. fn feed(big: *Self, op_ref: Air.Inst.Ref) !void { + const ip = big.a.intern_pool; // Note that after this, `operands_remaining` becomes the index of the current operand big.operands_remaining -= 1; @@ -1834,14 +1840,14 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); switch (inst_tags[operand]) { - .constant, .const_ty => return, + .constant, .const_ty, .interned => return, else => {}, } // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (big.will_die_immediately and !big.a.air.mustLower(big.inst)) return; + if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip.*)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index a55ebe52a6..e05f1814ce 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -5,6 +5,7 @@ air: Air, liveness: Liveness, live: LiveMap = .{}, blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{}, +intern_pool: *const InternPool, pub const Error = error{ LivenessInvalid, OutOfMemory }; @@ -27,10 +28,11 @@ pub fn verify(self: *Verify) Error!void { const LiveMap = std.AutoHashMapUnmanaged(Air.Inst.Index, void); fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { + const ip = self.intern_pool; const tag = self.air.instructions.items(.tag); const data = self.air.instructions.items(.data); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) { // This instruction will not be lowered and should be ignored. continue; } @@ -42,6 +44,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .ret_ptr, .constant, .const_ty, + .interned, .breakpoint, .dbg_stmt, .dbg_inline_begin, @@ -554,7 +557,7 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { const operand = Air.refToIndex(op_ref) orelse return; switch (self.air.instructions.items(.tag)[operand]) { - .constant, .const_ty => {}, + .constant, .const_ty, .interned => {}, else => { if (dies) { if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); @@ -576,7 +579,7 @@ fn verifyInst( } const tag = self.air.instructions.items(.tag); switch (tag[inst]) { - .constant, .const_ty => unreachable, + .constant, .const_ty, .interned => unreachable, else => { if (self.liveness.isUnused(inst)) { assert(!self.live.contains(inst)); @@ -604,4 +607,5 @@ const log = std.log.scoped(.liveness_verify); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const InternPool = @import("../InternPool.zig"); const Verify = @This(); diff --git a/src/Module.zig b/src/Module.zig index 5756955d3c..4187ac206b 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4397,7 +4397,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (no_bin_file and !dump_air and !dump_llvm_ir) return; log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air); + var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool); defer liveness.deinit(gpa); if (dump_air) { @@ -4414,6 +4414,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { .gpa = gpa, .air = air, .liveness = liveness, + .intern_pool = &mod.intern_pool, }; defer verify.deinit(); diff --git a/src/Sema.zig b/src/Sema.zig index 9b76fee68e..540474c84a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -33007,7 +33007,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - return sema.getTmpAir().typeOf(inst); + return sema.getTmpAir().typeOf(inst, sema.mod.intern_pool); } pub fn getTmpAir(sema: Sema) Air { @@ -33019,88 +33019,14 @@ pub fn getTmpAir(sema: Sema) Air { } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { - switch (ty.ip_index) { - .u1_type => return .u1_type, - .u8_type => return .u8_type, - .i8_type => return .i8_type, - .u16_type => return .u16_type, - .i16_type => return .i16_type, - .u29_type => return .u29_type, - .u32_type => return .u32_type, - .i32_type => return .i32_type, - .u64_type => return .u64_type, - .i64_type => return .i64_type, - .u80_type => return .u80_type, - .u128_type => return .u128_type, - .i128_type => return .i128_type, - .usize_type => return .usize_type, - .isize_type => return .isize_type, - .c_char_type => return .c_char_type, - .c_short_type => return .c_short_type, - .c_ushort_type => return .c_ushort_type, - .c_int_type => return .c_int_type, - .c_uint_type => return .c_uint_type, - .c_long_type => return .c_long_type, - .c_ulong_type => return .c_ulong_type, - .c_longlong_type => return .c_longlong_type, - .c_ulonglong_type => return .c_ulonglong_type, - .c_longdouble_type => return .c_longdouble_type, - .f16_type => return .f16_type, - .f32_type => return .f32_type, - .f64_type => return .f64_type, - .f80_type => return .f80_type, - .f128_type => return .f128_type, - .anyopaque_type => return .anyopaque_type, - .bool_type => return .bool_type, - .void_type => return .void_type, - .type_type => return .type_type, - .anyerror_type => return .anyerror_type, - .comptime_int_type => return .comptime_int_type, - .comptime_float_type => return .comptime_float_type, - .noreturn_type => return .noreturn_type, - .anyframe_type => return .anyframe_type, - .null_type => return .null_type, - .undefined_type => return .undefined_type, - .enum_literal_type => return .enum_literal_type, - .atomic_order_type => return .atomic_order_type, - .atomic_rmw_op_type => return .atomic_rmw_op_type, - .calling_convention_type => return .calling_convention_type, - .address_space_type => return .address_space_type, - .float_mode_type => return .float_mode_type, - .reduce_op_type => return .reduce_op_type, - .call_modifier_type => return .call_modifier_type, - .prefetch_options_type => return .prefetch_options_type, - .export_options_type => return .export_options_type, - .extern_options_type => return .extern_options_type, - .type_info_type => return .type_info_type, - .manyptr_u8_type => return .manyptr_u8_type, - .manyptr_const_u8_type => return .manyptr_const_u8_type, - .single_const_pointer_to_comptime_int_type => return .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type => return .const_slice_u8_type, - .anyerror_void_error_union_type => return .anyerror_void_error_union_type, - .generic_poison_type => return .generic_poison_type, - .var_args_param_type => return .var_args_param_type, - .empty_struct_type => return .empty_struct_type, - - // values - .undef => unreachable, - .zero => unreachable, - .zero_usize => unreachable, - .one => unreachable, - .one_usize => unreachable, - .calling_convention_c => unreachable, - .calling_convention_inline => unreachable, - .void_value => unreachable, - .unreachable_value => unreachable, - .null_value => unreachable, - .bool_true => unreachable, - .bool_false => unreachable, - .empty_struct => unreachable, - .generic_poison => unreachable, - - _ => {}, - - .none => unreachable, + if (ty.ip_index != .none) { + if (@enumToInt(ty.ip_index) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(ty.ip_index)); + try sema.air_instructions.append(sema.gpa, .{ + .tag = .interned, + .data = .{ .interned = ty.ip_index }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } switch (ty.tag()) { .u1 => return .u1_type, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 4370977272..2846633275 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -521,7 +521,7 @@ fn gen(self: *Self) !void { const inst = self.air.getMainBody()[arg_index]; assert(self.air.instructions.items(.tag)[inst] == .arg); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const abi_size = @intCast(u32, ty.abiSize(mod)); const abi_align = ty.abiAlignment(mod); @@ -653,13 +653,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -845,6 +846,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -1028,7 +1030,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).elemType(); if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all @@ -1067,7 +1069,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); + const stack_mcv = try self.allocRegOrMem(self.typeOfIndex(inst), false, inst); log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -1079,14 +1081,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Save the current instruction stored in the compare flags if /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.compare_flags_inst) |inst_to_save| { - const ty = self.air.typeOfIndex(inst_to_save); + const ty = self.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .compare_flags => try self.allocRegOrMem(ty, true, inst_to_save), @@ -1094,7 +1096,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1126,9 +1128,9 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const raw_reg = try self.register_manager.allocReg(reg_owner, gp); - const ty = self.air.typeOfIndex(reg_owner); + const ty = self.typeOfIndex(reg_owner); const reg = self.registerAlias(raw_reg, ty); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); + try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -1181,10 +1183,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const operand = ty_op.operand; const operand_mcv = try self.resolveInst(operand); - const operand_ty = self.air.typeOf(operand); + const operand_ty = self.typeOf(operand); const operand_info = operand_ty.intInfo(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_info = dest_ty.intInfo(mod); const result: MCValue = result: { @@ -1201,14 +1203,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (dest_info.bits > operand_info.bits) { const dest_mcv = try self.allocRegOrMem(dest_ty, true, inst); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated); + try self.setRegOrMem(self.typeOfIndex(inst), dest_mcv, truncated); break :result dest_mcv; } else { if (self.reuseOperand(inst, operand, 0, truncated)) { break :result truncated; } else { const dest_mcv = try self.allocRegOrMem(dest_ty, true, inst); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated); + try self.setRegOrMem(self.typeOfIndex(inst), dest_mcv, truncated); break :result dest_mcv; } } @@ -1303,8 +1305,8 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand, operand_ty, dest_ty); @@ -1325,7 +1327,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -1492,8 +1494,8 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1512,9 +1514,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -2436,8 +2438,8 @@ fn ptrArithmetic( fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -2487,8 +2489,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -2525,10 +2527,10 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -2653,10 +2655,10 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -2877,10 +2879,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -3013,7 +3015,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOf(ty_op.operand); + const optional_ty = self.typeOf(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand); break :result try self.optionalPayload(inst, mcv, optional_ty); }; @@ -3132,7 +3134,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; @@ -3212,7 +3214,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; @@ -3266,12 +3268,12 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { } const result: MCValue = result: { - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBits(mod)) { break :result MCValue{ .immediate = 1 }; } - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const operand_lock: ?RegisterLock = switch (operand) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -3433,7 +3435,7 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -3482,8 +3484,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const slice_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const slice_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; @@ -3499,7 +3501,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -3516,8 +3518,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const ptr_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const ptr_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; @@ -3862,16 +3864,16 @@ fn genInlineMemsetCode( } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const elem_ty = self.typeOfIndex(inst); const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -3886,7 +3888,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -4068,8 +4070,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -4093,7 +4095,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde return if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); + const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -4118,7 +4120,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); + const struct_ty = self.typeOf(operand); const struct_field_ty = struct_ty.structFieldType(index); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); @@ -4194,7 +4196,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); @@ -4247,7 +4249,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -4294,7 +4296,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -4470,7 +4472,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = self.fn_type.fnReturnType(); switch (self.ret_mcv) { @@ -4512,7 +4514,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); @@ -4652,7 +4654,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -4804,7 +4806,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -4831,7 +4833,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -4936,7 +4938,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNull(.{ .mcv = operand }, operand_ty); }; @@ -4947,7 +4949,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4962,7 +4964,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNonNull(.{ .mcv = operand }, operand_ty); }; @@ -4973,7 +4975,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4988,7 +4990,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isErr(error_union_bind, error_union_ty); }; @@ -4999,7 +5001,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -5014,7 +5016,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isNonErr(error_union_bind, error_union_ty); }; @@ -5025,7 +5027,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -5093,7 +5095,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try self.liveness.getSwitchBr( self.gpa, @@ -5241,7 +5243,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5249,14 +5251,14 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .compare_flags => blk: { - const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + const new_mcv = try self.allocRegOrMem(self.typeOfIndex(block), true, block); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -5322,7 +5324,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -5945,7 +5947,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest = try self.allocRegOrMem(dest_ty, true, inst); try self.setRegOrMem(dest_ty, dest, operand); break :result dest; @@ -5956,7 +5958,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); @@ -6076,7 +6078,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -6125,7 +6127,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); const error_union_align = error_union_ty.abiAlignment(mod); @@ -6159,7 +6161,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); + const inst_ty = self.typeOf(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; @@ -6428,3 +6430,13 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register { }, } } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 4c7151cd47..eb8cfa9707 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -477,6 +477,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { + const mod = self.bin_file.options.module.?; const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { // push {fp, lr} @@ -518,9 +519,8 @@ fn gen(self: *Self) !void { const inst = self.air.getMainBody()[arg_index]; assert(self.air.instructions.items(.tag)[inst] == .arg); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); - const mod = self.bin_file.options.module.?; const abi_size = @intCast(u32, ty.abiSize(mod)); const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); @@ -637,13 +637,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -829,6 +830,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -1008,7 +1010,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).elemType(); if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, @@ -1050,7 +1052,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); + const stack_mcv = try self.allocRegOrMem(self.typeOfIndex(inst), false, inst); log.debug("spilling {} (%{d}) to stack mcv {any}", .{ reg, inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -1064,14 +1066,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Save the current instruction stored in the compare flags if /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.cpsr_flags_inst) |inst_to_save| { - const ty = self.air.typeOfIndex(inst_to_save); + const ty = self.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .cpsr_flags => try self.allocRegOrMem(ty, true, inst_to_save), @@ -1081,7 +1083,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1151,15 +1153,15 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); - const mod = self.bin_file.options.module.?; const operand_abi_size = operand_ty.abiSize(mod); const dest_abi_size = dest_ty.abiSize(mod); const info_a = operand_ty.intInfo(mod); @@ -1262,8 +1264,8 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand_bind, operand_ty, dest_ty); @@ -1284,7 +1286,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (try operand_bind.resolveToMcv(self)) { .dead => unreachable, .unreach => unreachable, @@ -1467,8 +1469,8 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1487,9 +1489,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const stack_offset = try self.allocMem(8, 4, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); @@ -1501,8 +1503,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1552,8 +1554,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1590,10 +1592,10 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -1703,10 +1705,10 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -1865,10 +1867,10 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); const mod = self.bin_file.options.module.?; const result: MCValue = result: { - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); const tuple_align = tuple_ty.abiAlignment(mod); const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); @@ -2019,10 +2021,10 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); - const mod = self.bin_file.options.module.?; + const optional_ty = self.typeOfIndex(inst); const abi_size = @intCast(u32, optional_ty.abiSize(mod)); // Optional with a zero-bit payload type is just a boolean true @@ -2105,7 +2107,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; @@ -2182,7 +2184,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; @@ -2430,7 +2432,7 @@ fn ptrElemVal( fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -2456,8 +2458,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const slice_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const slice_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; @@ -2523,7 +2525,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const array_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); break :result try self.arrayElemVal(array_bind, index_bind, array_ty, inst); }; @@ -2532,7 +2534,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -2549,8 +2551,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const ptr_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const ptr_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; @@ -2736,13 +2738,13 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -2755,7 +2757,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dest_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dest_mcv, ptr, self.typeOf(ty_op.operand)); break :result dest_mcv; }; @@ -2860,8 +2862,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -2885,7 +2887,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde return if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); + const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -2910,7 +2912,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); + const struct_ty = self.typeOf(operand); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); const struct_field_ty = struct_ty.structFieldType(index); @@ -4169,7 +4171,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); @@ -4222,7 +4224,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -4276,7 +4278,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -4418,7 +4420,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = self.fn_type.fnReturnType(); switch (self.ret_mcv) { @@ -4461,7 +4463,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); @@ -4600,7 +4602,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -4755,7 +4757,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -4782,7 +4784,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -4827,7 +4829,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = un_op }; - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNull(operand_bind, operand_ty); }; @@ -4838,7 +4840,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4853,7 +4855,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = un_op }; - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNonNull(operand_bind, operand_ty); }; @@ -4864,7 +4866,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4913,7 +4915,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isErr(error_union_bind, error_union_ty); }; @@ -4924,7 +4926,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -4939,7 +4941,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isNonErr(error_union_bind, error_union_ty); }; @@ -4950,7 +4952,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const elem_ty = ptr_ty.elemType(); const operand = try self.allocRegOrMem(elem_ty, true, null); @@ -5018,7 +5020,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try self.liveness.getSwitchBr( self.gpa, @@ -5164,7 +5166,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5172,14 +5174,14 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .cpsr_flags => blk: { - const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + const new_mcv = try self.allocRegOrMem(self.typeOfIndex(block), true, block); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -5243,7 +5245,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -5896,7 +5898,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest = try self.allocRegOrMem(dest_ty, true, inst); try self.setRegOrMem(dest_ty, dest, operand); break :result dest; @@ -5907,7 +5909,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); @@ -6023,7 +6025,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -6072,7 +6074,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const mod = self.bin_file.options.module.?; const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); const error_union_align = error_union_ty.abiAlignment(mod); @@ -6107,7 +6109,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); + const inst_ty = self.typeOf(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; @@ -6333,3 +6335,13 @@ fn parseRegName(name: []const u8) ?Register { } return std.meta.stringToEnum(Register, name); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 75d5a87bf2..4ab798fe9c 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -470,13 +470,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -658,6 +659,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -804,8 +806,8 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).elemType(); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -815,8 +817,8 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -845,7 +847,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Copies a value to a register without tracking the register. The register is not considered @@ -862,7 +864,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const reg = try self.register_manager.allocReg(reg_owner, gp); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); + try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -894,10 +896,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const mod = self.bin_file.options.module.?; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(mod); - const info_b = self.air.typeOfIndex(inst).intInfo(mod); + const info_b = self.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1126,8 +1128,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1138,8 +1140,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1333,7 +1335,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true if (optional_ty.abiSize(mod) == 1) @@ -1525,15 +1527,15 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - const mod = self.bin_file.options.module.?; if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -1545,7 +1547,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1586,8 +1588,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -1647,7 +1649,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); _ = ty; const result = self.args[arg_index]; @@ -1704,7 +1706,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const fn_ty = self.air.typeOf(pl_op.operand); + const fn_ty = self.typeOf(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); @@ -1717,7 +1719,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (self.bin_file.cast(link.File.Elf)) |elf_file| { for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -1829,9 +1831,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const mod = self.bin_file.options.module.?; - assert(ty.eql(self.air.typeOf(bin_op.rhs), mod)); + assert(ty.eql(self.typeOf(bin_op.rhs), mod)); if (ty.zigTypeTag(mod) == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); @@ -1950,7 +1952,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNull(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1977,7 +1979,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonNull(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2004,7 +2006,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isErr(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2031,7 +2033,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonErr(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2112,13 +2114,13 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; const mod = self.bin_file.options.module.?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -2181,7 +2183,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -2377,7 +2379,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand); + try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2494,7 +2496,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -2541,7 +2543,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); + const inst_ty = self.typeOf(inst); if (!inst_ty.hasRuntimeBits(mod)) return MCValue{ .none = {} }; @@ -2733,3 +2735,13 @@ fn parseRegName(name: []const u8) ?Register { } return std.meta.stringToEnum(Register, name); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 63b604857e..e79a216315 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -490,13 +490,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -678,6 +679,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -762,8 +764,8 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), @@ -836,7 +838,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -871,7 +873,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); @@ -935,7 +937,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -1008,16 +1010,16 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const arg = self.args[arg_index]; const mcv = blk: { switch (arg) { .stack_offset => |off| { - const mod = self.bin_file.options.module.?; const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; @@ -1063,8 +1065,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -1088,8 +1090,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -1115,7 +1117,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand); + try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1218,7 +1220,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { // TODO: Fold byteswap+store into a single ST*A and load+byteswap into a single LD*A. const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO byteswap for vectors", .{}), .Int => { @@ -1294,7 +1296,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -1318,7 +1320,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { @@ -1428,7 +1430,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. @@ -1605,7 +1607,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -1632,7 +1634,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -1755,10 +1757,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const mod = self.bin_file.options.module.?; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(mod); - const info_b = self.air.typeOfIndex(inst).intInfo(mod); + const info_b = self.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1780,7 +1782,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); break :result try self.isErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1790,7 +1792,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); break :result try self.isNonErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1815,16 +1817,16 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const elem_ty = self.typeOfIndex(inst); const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -1839,7 +1841,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1882,8 +1884,8 @@ fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead @@ -1897,8 +1899,8 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); assert(lhs_ty.eql(rhs_ty, self.bin_file.options.module.?)); if (self.liveness.isUnused(inst)) @@ -2045,8 +2047,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -2108,7 +2110,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -2285,8 +2287,8 @@ fn airRem(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); // TODO add safety check @@ -2341,8 +2343,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), @@ -2429,9 +2431,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -2453,7 +2455,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_mcv = try self.resolveInst(bin_op.lhs); const index_mcv = try self.resolveInst(bin_op.rhs); - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const elem_ty = slice_ty.childType(); const mod = self.bin_file.options.module.?; const elem_size = elem_ty.abiSize(mod); @@ -2544,8 +2546,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -2573,7 +2575,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); + const struct_ty = self.typeOf(operand); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -2659,8 +2661,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand, operand_ty, dest_ty); @@ -2674,7 +2676,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const error_union = try self.resolveInst(pl_op.operand); const is_err_result = try self.isErr(error_union_ty, error_union); const reloc = try self.condBr(is_err_result); @@ -2706,7 +2708,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); const mod = self.bin_file.options.module.?; @@ -2720,7 +2722,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); const mod = self.bin_file.options.module.?; if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; @@ -2753,12 +2755,12 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - const mod = self.bin_file.options.module.?; if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; @@ -2794,9 +2796,9 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); - const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).elemType(); + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all @@ -2814,8 +2816,8 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -3406,7 +3408,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; const mod = self.bin_file.options.module.?; - if (self.air.typeOf(operand).hasRuntimeBits(mod)) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3415,13 +3417,13 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .register, .stack_offset, .memory => operand_mcv, .immediate => blk: { const new_mcv = try self.allocRegOrMem(block, true); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -4549,7 +4551,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; - const ty = self.air.typeOf(ref); + const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; @@ -4654,7 +4656,7 @@ fn spillConditionFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -4678,7 +4680,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { @@ -4726,7 +4728,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde return if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); + const ptr_ty = self.typeOf(operand); const struct_ty = ptr_ty.childType(); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -4885,3 +4887,13 @@ fn wantSafety(self: *Self) bool { .ReleaseSmall => false, }; } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b592ffcb2a..cd61eaf1fb 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -790,7 +790,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { const mod = func.bin_file.base.options.module.?; const val = func.air.value(ref, mod).?; - const ty = func.air.typeOf(ref); + const ty = func.typeOf(ref); if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; @@ -1260,7 +1260,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // we emit an unreachable instruction to tell the stack validator that part will never be reached. if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst = @intCast(u32, func.air.instructions.len - 1); - const last_inst_ty = func.air.typeOfIndex(inst); + const last_inst_ty = func.typeOfIndex(inst); if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn()) { try func.addTag(.@"unreachable"); } @@ -1541,7 +1541,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { /// if it is set, to ensure the stack alignment will be set correctly. fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { const mod = func.bin_file.base.options.module.?; - const ptr_ty = func.air.typeOfIndex(inst); + const ptr_ty = func.typeOfIndex(inst); const pointee_ty = ptr_ty.childType(); if (func.initial_stack_value == .none) { @@ -1834,6 +1834,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return switch (air_tags[inst]) { .constant => unreachable, .const_ty => unreachable, + .interned => unreachable, .add => func.airBinOp(inst, .add), .add_sat => func.airSatBinOp(inst, .add), @@ -2073,8 +2074,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; + const ip = &mod.intern_pool; + for (body) |inst| { - if (func.liveness.isUnused(inst) and !func.air.mustLower(inst)) { + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip.*)) { continue; } const old_bookkeeping_value = func.air_bookkeeping; @@ -2134,8 +2138,8 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const child_type = func.air.typeOfIndex(inst).childType(); const mod = func.bin_file.base.options.module.?; + const child_type = func.typeOfIndex(inst).childType(); var result = result: { if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { @@ -2157,7 +2161,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ret_ty = func.air.typeOf(un_op).childType(); + const ret_ty = func.typeOf(un_op).childType(); const fn_info = func.decl.ty.fnInfo(); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2179,7 +2183,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]); - const ty = func.air.typeOf(pl_op.operand); + const ty = func.typeOf(pl_op.operand); const mod = func.bin_file.base.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { @@ -2228,7 +2232,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif for (args) |arg| { const arg_val = try func.resolveInst(arg); - const arg_ty = func.air.typeOf(arg); + const arg_ty = func.typeOf(arg); if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); @@ -2296,7 +2300,7 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ptr_info = ptr_ty.ptrInfo().data; const ty = ptr_ty.childType(); @@ -2449,7 +2453,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); - const ptr_ty = func.air.typeOf(ty_op.operand); + const ptr_ty = func.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo().data; if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); @@ -2522,11 +2526,11 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu } fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const arg_index = func.arg_index; const arg = func.args[arg_index]; const cc = func.decl.ty.fnInfo().cc; - const arg_ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const arg_ty = func.typeOfIndex(inst); if (cc == .C) { const arg_classes = abi.classifyType(arg_ty, mod); for (arg_classes) |class| { @@ -2572,8 +2576,8 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.air.typeOf(bin_op.lhs); - const rhs_ty = func.air.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); // For certain operations, such as shifting, the types are different. // When converting this to a WebAssembly type, they *must* match to perform @@ -2770,7 +2774,7 @@ const FloatOp = enum { fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ty = func.air.typeOf(un_op); + const ty = func.typeOf(un_op); const result = try (try func.floatOp(op, ty, &.{operand})).toLocal(func, ty); func.finishAir(inst, result, &.{un_op}); @@ -2847,8 +2851,8 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.air.typeOf(bin_op.lhs); - const rhs_ty = func.air.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement wrapping arithmetic for vectors", .{}); @@ -3387,7 +3391,7 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const operand_ty = func.air.typeOf(bin_op.lhs); + const operand_ty = func.typeOf(bin_op.lhs); const result = try (try func.cmp(lhs, rhs, operand_ty, op)).toLocal(func, Type.u32); // comparison result is always 32 bits func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } @@ -3488,7 +3492,7 @@ fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const block = func.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { + if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { const operand = try func.resolveInst(br.operand); try func.lowerToStack(operand); @@ -3509,7 +3513,7 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.air.typeOf(ty_op.operand); + const operand_ty = func.typeOf(ty_op.operand); const mod = func.bin_file.base.options.module.?; const result = result: { @@ -3575,8 +3579,8 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const result = result: { const operand = try func.resolveInst(ty_op.operand); - const wanted_ty = func.air.typeOfIndex(inst); - const given_ty = func.air.typeOf(ty_op.operand); + const wanted_ty = func.typeOfIndex(inst); + const given_ty = func.typeOf(ty_op.operand); if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) { const bitcast_result = try func.bitcast(wanted_ty, given_ty, operand); break :result try bitcast_result.toLocal(func, wanted_ty); @@ -3609,7 +3613,7 @@ fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.StructField, ty_pl.payload); const struct_ptr = try func.resolveInst(extra.data.struct_operand); - const struct_ty = func.air.typeOf(extra.data.struct_operand).childType(); + const struct_ty = func.typeOf(extra.data.struct_operand).childType(); const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ty, extra.data.field_index); func.finishAir(inst, result, &.{extra.data.struct_operand}); } @@ -3617,7 +3621,7 @@ fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try func.resolveInst(ty_op.operand); - const struct_ty = func.air.typeOf(ty_op.operand).childType(); + const struct_ty = func.typeOf(ty_op.operand).childType(); const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ty, index); func.finishAir(inst, result, &.{ty_op.operand}); @@ -3632,7 +3636,7 @@ fn structFieldPtr( index: u32, ) InnerError!WValue { const mod = func.bin_file.base.options.module.?; - const result_ty = func.air.typeOfIndex(inst); + const result_ty = func.typeOfIndex(inst); const offset = switch (struct_ty.containerLayout()) { .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { @@ -3663,7 +3667,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = func.air.typeOf(struct_field.struct_operand); + const struct_ty = func.typeOf(struct_field.struct_operand); const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); @@ -3762,7 +3766,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const blocktype = wasm.block_empty; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const target = try func.resolveInst(pl_op.operand); - const target_ty = func.air.typeOf(pl_op.operand); + const target_ty = func.typeOf(pl_op.operand); const switch_br = func.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.data.cases_len + 1); defer func.gpa.free(liveness.deaths); @@ -3940,7 +3944,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const err_union_ty = func.air.typeOf(un_op); + const err_union_ty = func.typeOf(un_op); const pl_ty = err_union_ty.errorUnionPayload(); const result = result: { @@ -3976,7 +3980,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); @@ -4004,7 +4008,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); @@ -4028,9 +4032,9 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const err_ty = func.air.typeOfIndex(inst); + const err_ty = func.typeOfIndex(inst); - const pl_ty = func.air.typeOf(ty_op.operand); + const pl_ty = func.typeOf(ty_op.operand); const result = result: { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); @@ -4082,7 +4086,7 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.getRefType(ty_op.ty); const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.air.typeOf(ty_op.operand); + const operand_ty = func.typeOf(ty_op.operand); const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) { return func.fail("todo Wasm intcast for vectors", .{}); @@ -4155,7 +4159,7 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const op_ty = func.air.typeOf(un_op); + const op_ty = func.typeOf(un_op); const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty; const is_null = try func.isNull(operand, optional_ty, opcode); const result = try is_null.toLocal(func, optional_ty); @@ -4196,8 +4200,8 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const opt_ty = func.air.typeOf(ty_op.operand); - const payload_ty = func.air.typeOfIndex(inst); + const opt_ty = func.typeOf(ty_op.operand); + const payload_ty = func.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.finishAir(inst, .none, &.{ty_op.operand}); } @@ -4219,7 +4223,7 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(); const mod = func.bin_file.base.options.module.?; const result = result: { @@ -4238,7 +4242,7 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4263,7 +4267,7 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const payload_ty = func.air.typeOf(ty_op.operand); + const payload_ty = func.typeOf(ty_op.operand); const mod = func.bin_file.base.options.module.?; const result = result: { @@ -4276,7 +4280,7 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOfIndex(inst); + const op_ty = func.typeOfIndex(inst); if (op_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4304,7 +4308,7 @@ fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const slice_ty = func.air.typeOfIndex(inst); + const slice_ty = func.typeOfIndex(inst); const slice = try func.allocStack(slice_ty); try func.store(slice, lhs, Type.usize, 0); @@ -4323,7 +4327,7 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const slice_ty = func.air.typeOf(bin_op.lhs); + const slice_ty = func.typeOf(bin_op.lhs); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(); @@ -4395,7 +4399,7 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const wanted_ty = func.air.getRefType(ty_op.ty); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const result = try func.trunc(operand, wanted_ty, op_ty); func.finishAir(inst, try result.toLocal(func, wanted_ty), &.{ty_op.operand}); @@ -4432,7 +4436,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const array_ty = func.air.typeOf(ty_op.operand).childType(); + const array_ty = func.typeOf(ty_op.operand).childType(); const slice_ty = func.air.getRefType(ty_op.ty); // create a slice on the stack @@ -4453,7 +4457,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ptr_ty = func.air.typeOf(un_op); + const ptr_ty = func.typeOf(un_op); const result = if (ptr_ty.isSlice()) try func.slicePtr(operand) else switch (operand) { @@ -4467,7 +4471,7 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = ptr_ty.childType(); @@ -4505,7 +4509,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const elem_ty = func.air.getRefType(ty_pl.ty).childType(); const mod = func.bin_file.base.options.module.?; const elem_size = elem_ty.abiSize(mod); @@ -4538,7 +4542,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const offset = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const pointee_ty = switch (ptr_ty.ptrSize()) { .One => ptr_ty.childType().childType(), // ptr to array, so get array element type else => ptr_ty.childType(), @@ -4568,7 +4572,7 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr = try func.resolveInst(bin_op.lhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const value = try func.resolveInst(bin_op.rhs); const len = switch (ptr_ty.ptrSize()) { .Slice => try func.sliceLen(ptr), @@ -4683,7 +4687,7 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const array_ty = func.air.typeOf(bin_op.lhs); + const array_ty = func.typeOf(bin_op.lhs); const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); const elem_ty = array_ty.childType(); @@ -4750,12 +4754,12 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const dest_ty = func.air.typeOfIndex(inst); - const op_ty = func.air.typeOf(ty_op.operand); - const mod = func.bin_file.base.options.module.?; + const dest_ty = func.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{}); @@ -4775,12 +4779,12 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const dest_ty = func.air.typeOfIndex(inst); - const op_ty = func.air.typeOf(ty_op.operand); - const mod = func.bin_file.base.options.module.?; + const dest_ty = func.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{}); @@ -4804,7 +4808,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const elem_ty = ty.childType(); if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { @@ -4881,7 +4885,7 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; - const inst_ty = func.air.typeOfIndex(inst); + const inst_ty = func.typeOfIndex(inst); const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data; @@ -4894,7 +4898,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_size = child_ty.abiSize(mod); // TODO: One of them could be by ref; handle in loop - if (isByRef(func.air.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { + if (isByRef(func.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { @@ -4951,11 +4955,11 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; - const result_ty = func.air.typeOfIndex(inst); + const result_ty = func.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); - const mod = func.bin_file.base.options.module.?; const result: WValue = result_value: { switch (result_ty.zigTypeTag(mod)) { @@ -5085,7 +5089,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; const result = result: { - const union_ty = func.air.typeOfIndex(inst); + const union_ty = func.typeOfIndex(inst); const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = union_obj.fields.values()[extra.field_index]; @@ -5164,7 +5168,7 @@ fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pl_op = func.air.instructions.items(.data)[inst].pl_op; - const result = try func.allocLocal(func.air.typeOfIndex(inst)); + const result = try func.allocLocal(func.typeOfIndex(inst)); try func.addLabel(.memory_size, pl_op.payload); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{pl_op.operand}); @@ -5174,7 +5178,7 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { const pl_op = func.air.instructions.items(.data)[inst].pl_op; const operand = try func.resolveInst(pl_op.operand); - const result = try func.allocLocal(func.air.typeOfIndex(inst)); + const result = try func.allocLocal(func.typeOfIndex(inst)); try func.emitWValue(operand); try func.addLabel(.memory_grow, pl_op.payload); try func.addLabel(.local_set, result.local.value); @@ -5263,8 +5267,8 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const un_ty = func.air.typeOf(bin_op.lhs).childType(); - const tag_ty = func.air.typeOf(bin_op.rhs); + const un_ty = func.typeOf(bin_op.lhs).childType(); + const tag_ty = func.typeOf(bin_op.rhs); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); @@ -5288,8 +5292,8 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const un_ty = func.air.typeOf(ty_op.operand); - const tag_ty = func.air.typeOfIndex(inst); + const un_ty = func.typeOf(ty_op.operand); + const tag_ty = func.typeOfIndex(inst); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand}); @@ -5307,9 +5311,9 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const dest_ty = func.air.typeOfIndex(inst); + const dest_ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const extended = try func.fpext(operand, func.air.typeOf(ty_op.operand), dest_ty); + const extended = try func.fpext(operand, func.typeOf(ty_op.operand), dest_ty); const result = try extended.toLocal(func, dest_ty); func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5352,9 +5356,9 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const dest_ty = func.air.typeOfIndex(inst); + const dest_ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const truncated = try func.fptrunc(operand, func.air.typeOf(ty_op.operand), dest_ty); + const truncated = try func.fptrunc(operand, func.typeOf(ty_op.operand), dest_ty); const result = try truncated.toLocal(func, dest_ty); func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5393,7 +5397,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const err_set_ty = func.air.typeOf(ty_op.operand).childType(); + const err_set_ty = func.typeOf(ty_op.operand).childType(); const payload_ty = err_set_ty.errorUnionPayload(); const operand = try func.resolveInst(ty_op.operand); @@ -5448,10 +5452,10 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); - const dst_ty = func.air.typeOf(bin_op.lhs); + const dst_ty = func.typeOf(bin_op.lhs); const ptr_elem_ty = dst_ty.childType(); const src = try func.resolveInst(bin_op.rhs); - const src_ty = func.air.typeOf(bin_op.rhs); + const src_ty = func.typeOf(bin_op.rhs); const len = switch (dst_ty.ptrSize()) { .Slice => blk: { const slice_len = try func.sliceLen(dst); @@ -5485,12 +5489,12 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const op_ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); if (op_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement @popCount for vectors", .{}); @@ -5585,7 +5589,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lhs_op = try func.resolveInst(extra.lhs); const rhs_op = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); + const lhs_ty = func.typeOf(extra.lhs); const mod = func.bin_file.base.options.module.?; if (lhs_ty.zigTypeTag(mod) == .Vector) { @@ -5599,7 +5603,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro }; if (wasm_bits == 128) { - const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.air.typeOfIndex(inst), op); + const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.typeOfIndex(inst), op); return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); } @@ -5649,7 +5653,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro var overflow_local = try overflow_bit.toLocal(func, Type.u32); defer overflow_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); @@ -5729,8 +5733,8 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); - const rhs_ty = func.air.typeOf(extra.rhs); + const lhs_ty = func.typeOf(extra.lhs); + const rhs_ty = func.typeOf(extra.rhs); if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); @@ -5771,7 +5775,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); defer overflow_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); @@ -5785,7 +5789,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); + const lhs_ty = func.typeOf(extra.lhs); const mod = func.bin_file.base.options.module.?; if (lhs_ty.zigTypeTag(mod) == .Vector) { @@ -5946,7 +5950,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var bin_op_local = try bin_op.toLocal(func, lhs_ty); defer bin_op_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset); @@ -5955,10 +5959,10 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); } @@ -5986,11 +5990,11 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE } fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data; - const ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@mulAdd` for vectors", .{}); } @@ -6020,11 +6024,11 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; + const ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@clz` for vectors", .{}); } @@ -6073,12 +6077,12 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); + const ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); - const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@ctz` for vectors", .{}); } @@ -6141,7 +6145,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void { if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{}); const pl_op = func.air.instructions.items(.data)[inst].pl_op; - const ty = func.air.typeOf(pl_op.operand); + const ty = func.typeOf(pl_op.operand); const operand = try func.resolveInst(pl_op.operand); log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), operand }); @@ -6179,7 +6183,7 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const err_union = try func.resolveInst(pl_op.operand); const extra = func.air.extraData(Air.Try, pl_op.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.air.typeOf(pl_op.operand); + const err_union_ty = func.typeOf(pl_op.operand); const result = try lowerTry(func, inst, err_union, body, err_union_ty, false); func.finishAir(inst, result, &.{pl_op.operand}); } @@ -6189,7 +6193,7 @@ fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try func.resolveInst(extra.data.ptr); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = func.typeOf(extra.data.ptr).childType(); const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true); func.finishAir(inst, result, &.{extra.data.ptr}); } @@ -6251,11 +6255,11 @@ fn lowerTry( } fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: @byteSwap for vectors", .{}); @@ -6325,7 +6329,7 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6340,7 +6344,7 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6361,7 +6365,7 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const mod = func.bin_file.base.options.module.?; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6512,7 +6516,7 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const mod = func.bin_file.base.options.module.?; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); @@ -6626,7 +6630,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const mod = func.bin_file.base.options.module.?; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { @@ -6785,11 +6789,11 @@ fn callIntrinsic( fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const enum_ty = func.air.typeOf(un_op); + const enum_ty = func.typeOf(un_op); const func_sym_index = try func.getTagNameFunction(enum_ty); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.lowerToStack(result_ptr); try func.emitWValue(operand); try func.addLabel(.call, func_sym_index); @@ -7061,9 +7065,9 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = func.air.typeOf(extra.ptr); + const ptr_ty = func.typeOf(extra.ptr); const ty = ptr_ty.childType(); - const result_ty = func.air.typeOfIndex(inst); + const result_ty = func.typeOfIndex(inst); const ptr_operand = try func.resolveInst(extra.ptr); const expected_val = try func.resolveInst(extra.expected_value); @@ -7133,7 +7137,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const atomic_load = func.air.instructions.items(.data)[inst].atomic_load; const ptr = try func.resolveInst(atomic_load.ptr); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); if (func.useAtomicFeature()) { const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { @@ -7163,7 +7167,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(pl_op.operand); const operand = try func.resolveInst(extra.operand); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const op: std.builtin.AtomicRmwOp = extra.op(); if (func.useAtomicFeature()) { @@ -7348,7 +7352,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const operand = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ty = ptr_ty.childType(); if (func.useAtomicFeature()) { @@ -7380,3 +7384,13 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try WValue.toLocal(.stack, func, Type.usize); return func.finishAir(inst, result, &.{}); } + +fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { + const mod = func.bin_file.base.options.module.?; + return func.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { + const mod = func.bin_file.base.options.module.?; + return func.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 826bca2266..865ebe02f7 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -447,7 +447,7 @@ const InstTracking = struct { else => unreachable, } tracking_log.debug("spill %{d} from {} to {}", .{ inst, self.short, self.long }); - try function.genCopy(function.air.typeOfIndex(inst), self.long, self.short); + try function.genCopy(function.typeOfIndex(inst), self.long, self.short); } fn reuseFrame(self: *InstTracking) void { @@ -537,7 +537,7 @@ const InstTracking = struct { inst: Air.Inst.Index, target: InstTracking, ) !void { - const ty = function.air.typeOfIndex(inst); + const ty = function.typeOfIndex(inst); if ((self.long == .none or self.long == .reserved_frame) and target.long == .load_frame) try function.genCopy(ty, target.long, self.short); try function.genCopy(ty, target.short, self.short); @@ -1725,6 +1725,8 @@ fn gen(self: *Self) InnerError!void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { @@ -1733,7 +1735,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; wip_mir_log.debug("{}", .{self.fmtAir(inst)}); verbose_tracking_log.debug("{}", .{self.fmtTracking()}); @@ -1919,6 +1921,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -2255,7 +2258,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { const mod = self.bin_file.options.module.?; - const ptr_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOfIndex(inst); const val_ty = ptr_ty.childType(); return self.allocFrameIndex(FrameAlloc.init(.{ .size = math.cast(u32, val_ty.abiSize(mod)) orelse { @@ -2266,7 +2269,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - return self.allocRegOrMemAdvanced(self.air.typeOfIndex(inst), inst, reg_ok); + return self.allocRegOrMemAdvanced(self.typeOfIndex(inst), inst, reg_ok); } fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { @@ -2485,7 +2488,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .load_frame => .{ .register_offset = .{ .reg = (try self.copyToRegisterWithInstTracking( inst, - self.air.typeOfIndex(inst), + self.typeOfIndex(inst), self.ret_mcv.long, )).register, .off = self.ret_mcv.short.indirect.off, @@ -2496,9 +2499,9 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2562,9 +2565,9 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { fn airFpext(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2625,10 +2628,10 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_int_info = src_ty.intInfo(mod); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_int_info = dst_ty.intInfo(mod); const abi_size = @intCast(u32, dst_ty.abiSize(mod)); @@ -2707,9 +2710,9 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const result = result: { @@ -2818,7 +2821,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const operand = try self.resolveInst(un_op); const dst_mcv = if (self.reuseOperand(inst, un_op, 0, operand)) @@ -2834,11 +2837,11 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const slice_ty = self.air.typeOfIndex(inst); + const slice_ty = self.typeOfIndex(inst); const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); @@ -2877,7 +2880,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { const air_tag = self.air.instructions.items(.tag); const air_data = self.air.instructions.items(.data); - const dst_ty = self.air.typeOf(dst_air); + const dst_ty = self.typeOf(dst_air); const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { @@ -2889,7 +2892,7 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { @boolToInt(src_int.positive and dst_info.signedness == .signed); }, .intcast => { - const src_ty = self.air.typeOf(air_data[inst].ty_op.operand); + const src_ty = self.typeOf(air_data[inst].ty_op.operand); const src_info = src_ty.intInfo(mod); return @min(switch (src_info.signedness) { .signed => switch (dst_info.signedness) { @@ -2913,7 +2916,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result = result: { const tag = self.air.instructions.items(.tag)[inst]; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); switch (dst_ty.zigTypeTag(mod)) { .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs), else => {}, @@ -2942,7 +2945,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -3021,7 +3024,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -3093,7 +3096,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); try self.spillRegisters(&.{ .rax, .rdx }); const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); @@ -3151,7 +3154,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); switch (ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}), .Int => { @@ -3168,7 +3171,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .signed => .o, }; - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { switch (partial_mcv) { .register => |reg| { @@ -3211,8 +3214,8 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}), .Int => { @@ -3241,7 +3244,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, tmp_mcv, lhs); const cc = Condition.ne; - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { switch (partial_mcv) { .register => |reg| { @@ -3349,7 +3352,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const dst_ty = self.air.typeOf(bin_op.lhs); + const dst_ty = self.typeOf(bin_op.lhs); const result: MCValue = switch (dst_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), .Int => result: { @@ -3370,7 +3373,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const extra_bits = if (dst_info.bits <= 64) self.regExtraBits(dst_ty) else @@ -3525,8 +3528,8 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { try self.register_manager.getReg(.rcx, null); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result = try self.genShiftBinOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); @@ -3543,7 +3546,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const pl_ty = self.air.typeOfIndex(inst); + const pl_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) { @@ -3568,7 +3571,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) @@ -3582,8 +3585,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const opt_ty = src_ty.childType(); const src_mcv = try self.resolveInst(ty_op.operand); @@ -3615,7 +3618,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const err_ty = err_union_ty.errorUnionSet(); const payload_ty = err_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); @@ -3662,7 +3665,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const result = try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, operand); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -3720,7 +3723,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3756,7 +3759,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3765,7 +3768,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3791,7 +3794,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3816,7 +3819,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) break :result .unreach; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3856,10 +3859,10 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const pl_ty = self.air.typeOf(ty_op.operand); + const pl_ty = self.typeOf(ty_op.operand); if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 }; - const opt_ty = self.air.typeOfIndex(inst); + const opt_ty = self.typeOfIndex(inst); const pl_mcv = try self.resolveInst(ty_op.operand); const same_repr = opt_ty.optionalReprIsPayload(mod); if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv; @@ -3952,7 +3955,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; @@ -3980,7 +3983,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3989,7 +3992,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -4014,7 +4017,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) @@ -4046,7 +4049,7 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { const mod = self.bin_file.options.module.?; - const slice_ty = self.air.typeOf(lhs); + const slice_ty = self.typeOf(lhs); const slice_mcv = try self.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4059,7 +4062,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); - const index_ty = self.air.typeOf(rhs); + const index_ty = self.typeOf(rhs); const index_mcv = try self.resolveInst(rhs); const index_mcv_lock: ?RegisterLock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4083,7 +4086,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -4105,7 +4108,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); const array = try self.resolveInst(bin_op.lhs); const array_lock: ?RegisterLock = switch (array) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4116,7 +4119,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = array_ty.childType(); const elem_abi_size = elem_ty.abiSize(mod); - const index_ty = self.air.typeOf(bin_op.rhs); + const index_ty = self.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4170,14 +4173,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); // this is identical to the `airPtrElemPtr` codegen expect here an // additional `mov` is needed at the end to get the actual value const elem_ty = ptr_ty.elemType2(mod); const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); - const index_ty = self.air.typeOf(bin_op.rhs); + const index_ty = self.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4218,7 +4221,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(extra.lhs); + const ptr_ty = self.typeOf(extra.lhs); const ptr = try self.resolveInst(extra.lhs); const ptr_lock: ?RegisterLock = switch (ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4228,7 +4231,7 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = ptr_ty.elemType2(mod); const elem_abi_size = elem_ty.abiSize(mod); - const index_ty = self.air.typeOf(extra.rhs); + const index_ty = self.typeOf(extra.rhs); const index = try self.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4249,9 +4252,9 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_union_ty = self.air.typeOf(bin_op.lhs); + const ptr_union_ty = self.typeOf(bin_op.lhs); const union_ty = ptr_union_ty.childType(); - const tag_ty = self.air.typeOf(bin_op.rhs); + const tag_ty = self.typeOf(bin_op.rhs); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { @@ -4296,8 +4299,8 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const tag_ty = self.air.typeOfIndex(inst); - const union_ty = self.air.typeOf(ty_op.operand); + const tag_ty = self.typeOfIndex(inst); + const union_ty = self.typeOf(ty_op.operand); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { @@ -4350,8 +4353,8 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4479,8 +4482,8 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.bitSize(mod); const src_mcv = try self.resolveInst(ty_op.operand); @@ -4575,7 +4578,7 @@ fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); @@ -4745,7 +4748,7 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, true); @@ -4766,7 +4769,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); @@ -4876,12 +4879,12 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const tag = self.air.instructions.items(.tag)[inst]; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const abi_size: u32 = switch (ty.abiSize(mod)) { 1...16 => 16, 17...32 => 32, else => return self.fail("TODO implement airFloatSign for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), }; const scalar_bits = ty.scalarType(mod).floatBits(self.target.*); @@ -5005,7 +5008,7 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5093,7 +5096,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const abi_size = @intCast(u32, ty.abiSize(mod)); const src_mcv = try self.resolveInst(un_op); @@ -5399,7 +5402,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; @@ -5407,7 +5410,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const elem_size = elem_ty.abiSize(mod); const elem_rc = regClassForType(elem_ty, mod); @@ -5548,7 +5551,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_mcv = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const src_mcv = try self.resolveInst(bin_op.rhs); if (ptr_ty.ptrInfo().data.host_size > 0) { try self.packedStore(ptr_ty, ptr_mcv, src_mcv); @@ -5573,8 +5576,8 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { const mod = self.bin_file.options.module.?; - const ptr_field_ty = self.air.typeOfIndex(inst); - const ptr_container_ty = self.air.typeOf(operand); + const ptr_field_ty = self.typeOfIndex(inst); + const ptr_container_ty = self.typeOf(operand); const container_ty = ptr_container_ty.childType(); const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), @@ -5602,7 +5605,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; - const container_ty = self.air.typeOf(operand); + const container_ty = self.typeOf(operand); const container_rc = regClassForType(container_ty, mod); const field_ty = container_ty.structFieldType(index); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; @@ -5756,7 +5759,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const parent_ty = inst_ty.childType(); const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); @@ -5772,7 +5775,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { const mod = self.bin_file.options.module.?; - const src_ty = self.air.typeOf(src_air); + const src_ty = self.typeOf(src_air); const src_mcv = try self.resolveInst(src_air); if (src_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); @@ -6358,8 +6361,8 @@ fn genBinOp( rhs_air: Air.Inst.Ref, ) !MCValue { const mod = self.bin_file.options.module.?; - const lhs_ty = self.air.typeOf(lhs_air); - const rhs_ty = self.air.typeOf(rhs_air); + const lhs_ty = self.typeOf(lhs_air); + const rhs_ty = self.typeOf(rhs_air); const abi_size = @intCast(u32, lhs_ty.abiSize(mod)); const maybe_mask_reg = switch (air_tag) { @@ -7918,6 +7921,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; // skip zero-bit arguments as they don't have a corresponding arg instruction var arg_index = self.arg_index; while (self.args[arg_index] == .none) arg_index += 1; @@ -7931,9 +7935,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { else => return self.fail("TODO implement arg for {}", .{dst_mcv}), } - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const name = self.owner.mod_fn.getParamName(self.bin_file.options.module.?, src_index); + const name = self.owner.mod_fn.getParamName(mod, src_index); try self.genArgDbgInfo(ty, name, dst_mcv); break :result dst_mcv; @@ -8050,7 +8054,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, @@ -8085,7 +8089,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier else => unreachable, } for (args, info.args) |arg, mc_arg| { - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none => {}, @@ -8112,7 +8116,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier defer if (ret_lock) |lock| self.register_manager.unlockReg(lock); for (args, info.args) |arg, mc_arg| { - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none, .load_frame => {}, @@ -8241,7 +8245,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); switch (self.ret_mcv.short) { .none => {}, .register => try self.load(self.ret_mcv.short, ptr_ty, ptr), @@ -8258,7 +8262,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); try self.spillEflagsIfOccupied(); self.eflags_inst = inst; @@ -8476,7 +8480,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - const op_ty = self.air.typeOf(un_op); + const op_ty = self.typeOf(un_op); const op_abi_size = @intCast(u32, op_ty.abiSize(mod)); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { @@ -8496,7 +8500,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); + const err_union_ty = self.typeOf(pl_op.operand); const result = try self.genTry(inst, pl_op.operand, body, err_union_ty, false); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8505,7 +8509,7 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(); const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8584,7 +8588,7 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -8626,7 +8630,7 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); - const cond_ty = self.air.typeOf(pl_op.operand); + const cond_ty = self.typeOf(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; @@ -8871,7 +8875,7 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCVa fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNull(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8879,7 +8883,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNullPtr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8887,7 +8891,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = switch (try self.isNull(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, @@ -8898,7 +8902,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = switch (try self.isNullPtr(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, @@ -8909,7 +8913,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isErr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8932,7 +8936,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); const result = try self.isErr(inst, ptr_ty.childType(), operand); @@ -8943,7 +8947,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNonErr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8966,7 +8970,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); const result = try self.isNonErr(inst, ptr_ty.childType(), operand); @@ -9032,7 +9036,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const condition = try self.resolveInst(pl_op.operand); - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); var extra_index: usize = switch_br.end; var case_i: u32 = 0; @@ -9119,7 +9123,7 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const src_mcv = try self.resolveInst(br.operand); - const block_ty = self.air.typeOfIndex(br.block_inst); + const block_ty = self.typeOfIndex(br.block_inst); const block_unused = !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; @@ -9244,7 +9248,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(reg, self.air.typeOf(input), arg_mcv); + try self.genSetReg(reg, self.typeOf(input), arg_mcv); } { @@ -10169,7 +10173,7 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; @@ -10179,8 +10183,8 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const result = result: { const dst_rc = regClassForType(dst_ty, mod); @@ -10241,8 +10245,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const slice_ty = self.air.typeOfIndex(inst); - const ptr_ty = self.air.typeOf(ty_op.operand); + const slice_ty = self.typeOfIndex(inst); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); const array_ty = ptr_ty.childType(); const array_len = array_ty.arrayLen(); @@ -10264,11 +10268,11 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = @intCast(u32, src_ty.bitSize(mod)); const src_signedness = if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const src_size = math.divCeil(u32, @max(switch (src_signedness) { .signed => src_bits, @@ -10318,8 +10322,8 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); - const dst_ty = self.air.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); const dst_bits = @intCast(u32, dst_ty.bitSize(mod)); const dst_signedness = if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; @@ -10371,8 +10375,8 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(extra.ptr); - const val_ty = self.air.typeOf(extra.expected_value); + const ptr_ty = self.typeOf(extra.ptr); + const val_ty = self.typeOf(extra.expected_value); const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); @@ -10712,10 +10716,10 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { const unused = self.liveness.isUnused(inst); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const ptr_mcv = try self.resolveInst(pl_op.operand); - const val_ty = self.air.typeOf(extra.operand); + const val_ty = self.typeOf(extra.operand); const val_mcv = try self.resolveInst(extra.operand); const result = @@ -10726,7 +10730,7 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; - const ptr_ty = self.air.typeOf(atomic_load.ptr); + const ptr_ty = self.typeOf(atomic_load.ptr); const ptr_mcv = try self.resolveInst(atomic_load.ptr); const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -10747,10 +10751,10 @@ fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr_mcv = try self.resolveInst(bin_op.lhs); - const val_ty = self.air.typeOf(bin_op.rhs); + const val_ty = self.typeOf(bin_op.rhs); const val_mcv = try self.resolveInst(bin_op.rhs); const result = try self.atomicOp(ptr_mcv, val_mcv, ptr_ty, val_ty, true, null, order); @@ -10768,7 +10772,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.air.typeOf(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10776,7 +10780,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock); const src_val = try self.resolveInst(bin_op.rhs); - const elem_ty = self.air.typeOf(bin_op.rhs); + const elem_ty = self.typeOf(bin_op.rhs); const src_val_lock: ?RegisterLock = switch (src_val) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10888,7 +10892,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.air.typeOf(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10922,8 +10926,8 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const inst_ty = self.air.typeOfIndex(inst); - const enum_ty = self.air.typeOf(un_op); + const inst_ty = self.typeOfIndex(inst); + const enum_ty = self.typeOf(un_op); // We need a properly aligned and sized call frame to be able to call this function. { @@ -10964,7 +10968,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const err_ty = self.air.typeOf(un_op); + const err_ty = self.typeOf(un_op); const err_mcv = try self.resolveInst(un_op); const err_reg = try self.copyToTmpRegister(err_ty, err_mcv); const err_lock = self.register_manager.lockRegAssumeUnused(err_reg); @@ -11046,7 +11050,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { fn airSplat(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const dst_rc = regClassForType(vector_ty, mod); const scalar_ty = vector_ty.scalarType(mod); @@ -11266,7 +11270,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); @@ -11411,10 +11415,10 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const result: MCValue = result: { - const union_ty = self.air.typeOfIndex(inst); + const union_ty = self.typeOfIndex(inst); const layout = union_ty.unionGetLayout(mod); - const src_ty = self.air.typeOf(extra.init); + const src_ty = self.typeOf(extra.init); const src_mcv = try self.resolveInst(extra.init); if (layout.tag_size == 0) { if (self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv; @@ -11461,7 +11465,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); if (!self.hasFeature(.fma)) return self.fail("TODO implement airMulAdd for {}", .{ ty.fmt(self.bin_file.options.module.?), @@ -11609,7 +11613,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const mod = self.bin_file.options.module.?; - const ty = self.air.typeOf(ref); + const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; @@ -11713,7 +11717,7 @@ fn resolveCallingConventionValues( defer self.gpa.free(param_types); fn_ty.fnParamTypes(param_types); // TODO: promote var arg types - for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.air.typeOf(arg); + for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.typeOf(arg); var result: CallMCValues = .{ .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. @@ -12023,3 +12027,13 @@ fn hasAnyFeatures(self: *Self, features: anytype) bool { fn hasAllFeatures(self: *Self, features: anytype) bool { return Target.x86.featureSetHasAll(self.target.cpu.features, features); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, mod.intern_pool); +} diff --git a/src/codegen/c.zig b/src/codegen/c.zig index da040a6fbb..a87f37b1c9 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -288,7 +288,7 @@ pub const Function = struct { const mod = f.object.dg.module; const val = f.air.value(ref, mod).?; - const ty = f.air.typeOf(ref); + const ty = f.typeOf(ref); const result: CValue = if (lowersToArray(ty, mod)) result: { const writer = f.object.code_header.writer(); @@ -355,7 +355,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; return f.object.dg.renderValue(w, ty, val, location); }, @@ -368,7 +368,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); @@ -382,7 +382,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); @@ -396,7 +396,7 @@ pub const Function = struct { switch (c_value) { .constant => |inst| { const mod = f.object.dg.module; - const ty = f.air.typeOf(inst); + const ty = f.typeOf(inst); const val = f.air.value(inst, mod).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); @@ -486,6 +486,16 @@ pub const Function = struct { f.object.dg.ctypes.deinit(gpa); f.object.dg.fwd_decl.deinit(); } + + fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { + const mod = f.object.dg.module; + return f.air.typeOf(inst, mod.intern_pool); + } + + fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { + const mod = f.object.dg.module; + return f.air.typeOfIndex(inst, mod.intern_pool); + } }; /// This data is available when outputting .c code for a `Module`. @@ -2802,17 +2812,19 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { const mod = f.object.dg.module; + const ip = &mod.intern_pool; const air_tags = f.air.instructions.items(.tag); for (body) |inst| { - if (f.liveness.isUnused(inst) and !f.air.mustLower(inst)) { + if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip.*)) continue; - } const result_value = switch (air_tags[inst]) { // zig fmt: off .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies + .interned => unreachable, // excluded from function bodies + .arg => try airArg(f, inst), .trap => try airTrap(f.object.writer()), @@ -2837,7 +2849,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .div_trunc, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .none), .rem => blk: { const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const lhs_scalar_ty = f.air.typeOf(bin_op.lhs).scalarType(mod); + const lhs_scalar_ty = f.typeOf(bin_op.lhs).scalarType(mod); // For binary operations @TypeOf(lhs)==@TypeOf(rhs), // so we only check one. break :blk if (lhs_scalar_ty.isInt(mod)) @@ -3088,7 +3100,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: []const u8) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -3107,7 +3119,7 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3136,8 +3148,8 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const ptr_ty = f.air.typeOf(bin_op.lhs); + const inst_ty = f.typeOfIndex(inst); + const ptr_ty = f.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); @@ -3169,7 +3181,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[inst].bin_op; if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -3198,8 +3210,8 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const slice_ty = f.air.typeOf(bin_op.lhs); + const inst_ty = f.typeOfIndex(inst); + const slice_ty = f.typeOf(bin_op.lhs); const elem_ty = slice_ty.elemType2(mod); const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); @@ -3226,7 +3238,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; @@ -3251,7 +3263,7 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const elem_type = inst_ty.elemType(); if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; @@ -3267,7 +3279,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.elemType(); if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; @@ -3282,7 +3294,7 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_cty = try f.typeToIndex(inst_ty, .parameter); const i = f.next_arg_index; @@ -3309,7 +3321,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = f.air.typeOf(ty_op.operand); + const ptr_ty = f.typeOf(ty_op.operand); const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const src_ty = ptr_info.pointee_type; @@ -3399,7 +3411,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); const op_inst = Air.refToIndex(un_op); - const op_ty = f.air.typeOf(un_op); + const op_ty = f.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType() else op_ty; var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); @@ -3453,9 +3465,9 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); @@ -3478,13 +3490,13 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const dest_int_info = inst_scalar_ty.intInfo(mod); const dest_bits = dest_int_info.bits; const dest_c_bits = toCIntBits(dest_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); const scalar_int_info = scalar_ty.intInfo(mod); @@ -3572,7 +3584,7 @@ fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); const a = try Assignment.start(f, writer, inst_ty); try f.writeCValue(writer, local, .Other); @@ -3587,12 +3599,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { // *a = b; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = f.air.typeOf(bin_op.lhs); + const ptr_ty = f.typeOf(bin_op.lhs); const ptr_scalar_ty = ptr_ty.scalarType(mod); const ptr_info = ptr_scalar_ty.ptrInfo().data; const ptr_val = try f.resolveInst(bin_op.lhs); - const src_ty = f.air.typeOf(bin_op.rhs); + const src_ty = f.typeOf(bin_op.rhs); const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; @@ -3737,8 +3749,8 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); - const operand_ty = f.air.typeOf(bin_op.lhs); + const inst_ty = f.typeOfIndex(inst); + const operand_ty = f.typeOf(bin_op.lhs); const scalar_ty = operand_ty.scalarType(mod); const w = f.object.writer(); @@ -3769,14 +3781,14 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits); const op = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3802,7 +3814,7 @@ fn airBinOp( ) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); + const operand_ty = f.typeOf(bin_op.lhs); const scalar_ty = operand_ty.scalarType(mod); if ((scalar_ty.isInt(mod) and scalar_ty.bitSize(mod) > 64) or scalar_ty.isRuntimeFloat()) return try airBinBuiltinCall(f, inst, operation, info); @@ -3811,7 +3823,7 @@ fn airBinOp( const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3839,7 +3851,7 @@ fn airCmpOp( operator: std.math.CompareOperator, ) !CValue { const mod = f.object.dg.module; - const lhs_ty = f.air.typeOf(data.lhs); + const lhs_ty = f.typeOf(data.lhs); const scalar_ty = lhs_ty.scalarType(mod); const scalar_bits = scalar_ty.bitSize(mod); @@ -3855,12 +3867,12 @@ fn airCmpOp( if (scalar_ty.isRuntimeFloat()) return airCmpBuiltinCall(f, inst, data, operator, .operator, .none); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); - const rhs_ty = f.air.typeOf(data.rhs); + const rhs_ty = f.typeOf(data.rhs); const need_cast = lhs_ty.isSinglePointer() or rhs_ty.isSinglePointer(); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -3891,7 +3903,7 @@ fn airEquality( const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); + const operand_ty = f.typeOf(bin_op.lhs); const operand_bits = operand_ty.bitSize(mod); if (operand_ty.isInt(mod) and operand_bits > 64) return airCmpBuiltinCall( @@ -3910,7 +3922,7 @@ fn airEquality( try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); @@ -3954,7 +3966,7 @@ fn airEquality( fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -3976,7 +3988,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const elem_ty = inst_scalar_ty.elemType2(mod); @@ -4019,7 +4031,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); if (inst_scalar_ty.isInt(mod) and inst_scalar_ty.bitSize(mod) > 64) @@ -4065,7 +4077,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { const len = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = inst_ty.slicePtrFieldType(&buf); @@ -4110,7 +4122,7 @@ fn airCall( const resolved_args = try gpa.alloc(CValue, args.len); defer gpa.free(resolved_args); for (resolved_args, args) |*resolved_arg, arg| { - const arg_ty = f.air.typeOf(arg); + const arg_ty = f.typeOf(arg); const arg_cty = try f.typeToIndex(arg_ty, .parameter); if (f.indexToCType(arg_cty).tag() == .void) { resolved_arg.* = .none; @@ -4141,7 +4153,7 @@ fn airCall( for (args) |arg| try bt.feed(arg); } - const callee_ty = f.air.typeOf(pl_op.operand); + const callee_ty = f.typeOf(pl_op.operand); const fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => callee_ty.childType(), @@ -4279,7 +4291,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { f.next_block_index += 1; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const result = if (inst_ty.tag() != .void and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else @@ -4302,7 +4314,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.indent_writer.insertNewline(); // noreturn blocks have no `br` instructions reaching them, so we don't want a label - if (!f.air.typeOfIndex(inst).isNoReturn()) { + if (!f.typeOfIndex(inst).isNoReturn()) { // label must be followed by an expression, include an empty one. try writer.print("zig_block_{d}:;\n", .{block_id}); } @@ -4314,7 +4326,7 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.Try, pl_op.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.air.typeOf(pl_op.operand); + const err_union_ty = f.typeOf(pl_op.operand); return lowerTry(f, inst, pl_op.operand, body, err_union_ty, false); } @@ -4322,7 +4334,7 @@ fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.TryPtr, ty_pl.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = f.typeOf(extra.data.ptr).childType(); return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true); } @@ -4336,7 +4348,7 @@ fn lowerTry( ) !CValue { const mod = f.object.dg.module; const err_union = try f.resolveInst(operand); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); const payload_ty = err_union_ty.errorUnionPayload(); @@ -4404,7 +4416,7 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { // If result is .none then the value of the block is unused. if (result != .none) { - const operand_ty = f.air.typeOf(branch.operand); + const operand_ty = f.typeOf(branch.operand); const operand = try f.resolveInst(branch.operand); try reap(f, inst, &.{branch.operand}); @@ -4421,10 +4433,10 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const dest_ty = f.air.typeOfIndex(inst); + const dest_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const bitcasted = try bitcast(f, dest_ty, operand, operand_ty); try reap(f, inst, &.{ty_op.operand}); @@ -4684,7 +4696,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const condition = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); - const condition_ty = f.air.typeOf(pl_op.operand); + const condition_ty = f.typeOf(pl_op.operand); const switch_br = f.air.extraData(Air.SwitchBr, pl_op.payload); const writer = f.object.writer(); @@ -4784,7 +4796,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const result = result: { const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = if (inst_ty.hasRuntimeBitsIgnoreComptime(mod)) local: { const local = try f.allocLocal(inst, inst_ty); if (f.wantSafety()) { @@ -4814,7 +4826,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; if (is_reg) { - const output_ty = if (output == .none) inst_ty else f.air.typeOf(output).childType(); + const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(); try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(output_ty, alignment); @@ -4847,7 +4859,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[0] == '{'; const input_val = try f.resolveInst(input); if (asmInputNeedsLocal(constraint, input_val)) { - const input_ty = f.air.typeOf(input); + const input_ty = f.typeOf(input); if (is_reg) try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(input_ty, alignment); @@ -5048,7 +5060,7 @@ fn airIsNull( try f.writeCValue(writer, operand, .Other); } - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); const optional_ty = if (is_ptr) operand_ty.childType() else operand_ty; var payload_buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&payload_buf); @@ -5083,7 +5095,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const opt_ty = f.air.typeOf(ty_op.operand); + const opt_ty = f.typeOf(ty_op.operand); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); @@ -5092,7 +5104,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { return .none; } - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -5119,9 +5131,9 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const ptr_ty = f.air.typeOf(ty_op.operand); + const ptr_ty = f.typeOf(ty_op.operand); const opt_ty = ptr_ty.childType(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) { return .{ .undef = inst_ty }; @@ -5149,11 +5161,11 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const opt_ty = operand_ty.elemType(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (opt_ty.optionalReprIsPayload(mod)) { if (f.liveness.isUnused(inst)) { @@ -5249,7 +5261,7 @@ fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue { const container_ptr_val = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); - const container_ptr_ty = f.air.typeOf(extra.struct_operand); + const container_ptr_ty = f.typeOf(extra.struct_operand); return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, extra.field_index); } @@ -5258,7 +5270,7 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue const container_ptr_val = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const container_ptr_ty = f.air.typeOf(ty_op.operand); + const container_ptr_ty = f.typeOf(ty_op.operand); return fieldPtr(f, inst, container_ptr_ty, container_ptr_val, index); } @@ -5267,10 +5279,10 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const container_ptr_ty = f.air.typeOfIndex(inst); + const container_ptr_ty = f.typeOfIndex(inst); const container_ty = container_ptr_ty.childType(); - const field_ptr_ty = f.air.typeOf(extra.field_ptr); + const field_ptr_ty = f.typeOf(extra.field_ptr); const field_ptr_val = try f.resolveInst(extra.field_ptr); try reap(f, inst, &.{extra.field_ptr}); @@ -5334,7 +5346,7 @@ fn fieldPtr( ) !CValue { const mod = f.object.dg.module; const container_ty = container_ptr_ty.elemType(); - const field_ptr_ty = f.air.typeOfIndex(inst); + const field_ptr_ty = f.typeOfIndex(inst); // Ensure complete type definition is visible before accessing fields. _ = try f.typeToIndex(container_ty, .complete); @@ -5385,7 +5397,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod)) { try reap(f, inst, &.{extra.struct_operand}); return .none; @@ -5393,7 +5405,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const struct_byval = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); - const struct_ty = f.air.typeOf(extra.struct_operand); + const struct_ty = f.typeOf(extra.struct_operand); const writer = f.object.writer(); // Ensure complete type definition is visible before accessing fields. @@ -5514,9 +5526,9 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; @@ -5553,10 +5565,10 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; const writer = f.object.writer(); @@ -5589,9 +5601,9 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const repr_is_payload = inst_ty.optionalReprIsPayload(mod); - const payload_ty = f.air.typeOf(ty_op.operand); + const payload_ty = f.typeOf(ty_op.operand); const payload = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5621,7 +5633,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_ty = inst_ty.errorUnionSet(); @@ -5661,7 +5673,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); - const error_union_ty = f.air.typeOf(ty_op.operand).childType(); + const error_union_ty = f.typeOf(ty_op.operand).childType(); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); @@ -5684,7 +5696,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { // Then return the payload pointer (only if it is used) if (f.liveness.isUnused(inst)) return .none; - const local = try f.allocLocal(inst, f.air.typeOfIndex(inst)); + const local = try f.allocLocal(inst, f.typeOfIndex(inst)); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = &("); try f.writeCValueDeref(writer, operand); @@ -5711,7 +5723,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(); const payload = try f.resolveInst(ty_op.operand); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); @@ -5747,7 +5759,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const const writer = f.object.writer(); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); const local = try f.allocLocal(inst, Type.bool); const err_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); @@ -5780,10 +5792,10 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const array_ty = f.air.typeOf(ty_op.operand).childType(); + const array_ty = f.typeOf(ty_op.operand).childType(); try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); try writer.writeAll(" = "); @@ -5812,10 +5824,10 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const target = f.object.dg.module.getTarget(); const operation = if (inst_ty.isRuntimeFloat() and operand_ty.isRuntimeFloat()) if (inst_ty.floatBits(target) < operand_ty.floatBits(target)) "trunc" else "extend" @@ -5855,9 +5867,9 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); try reap(f, inst, &.{un_op}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); @@ -5885,9 +5897,9 @@ fn airUnBuiltinCall( const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); - const operand_ty = f.air.typeOf(ty_op.operand); + const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); @@ -5927,7 +5939,7 @@ fn airBinBuiltinCall( const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const operand_ty = f.air.typeOf(bin_op.lhs); + const operand_ty = f.typeOf(bin_op.lhs); const operand_cty = try f.typeToCType(operand_ty, .complete); const is_big = operand_cty.tag() == .array; @@ -5935,7 +5947,7 @@ fn airBinBuiltinCall( const rhs = try f.resolveInst(bin_op.rhs); if (!is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const scalar_ty = operand_ty.scalarType(mod); @@ -5984,9 +5996,9 @@ fn airCmpBuiltinCall( const rhs = try f.resolveInst(data.rhs); try reap(f, inst, &.{ data.lhs, data.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); - const operand_ty = f.air.typeOf(data.lhs); + const operand_ty = f.typeOf(data.lhs); const scalar_ty = operand_ty.scalarType(mod); const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); @@ -6032,11 +6044,11 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const ptr = try f.resolveInst(extra.ptr); const expected_value = try f.resolveInst(extra.expected_value); const new_value = try f.resolveInst(extra.new_value); - const ptr_ty = f.air.typeOf(extra.ptr); + const ptr_ty = f.typeOf(extra.ptr); const ty = ptr_ty.childType(); const writer = f.object.writer(); @@ -6137,8 +6149,8 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; - const inst_ty = f.air.typeOfIndex(inst); - const ptr_ty = f.air.typeOf(pl_op.operand); + const inst_ty = f.typeOfIndex(inst); + const ptr_ty = f.typeOf(pl_op.operand); const ty = ptr_ty.childType(); const ptr = try f.resolveInst(pl_op.operand); const operand = try f.resolveInst(extra.operand); @@ -6193,7 +6205,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const atomic_load = f.air.instructions.items(.data)[inst].atomic_load; const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); - const ptr_ty = f.air.typeOf(atomic_load.ptr); + const ptr_ty = f.typeOf(atomic_load.ptr); const ty = ptr_ty.childType(); const repr_ty = if (ty.isRuntimeFloat()) @@ -6201,7 +6213,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { else ty; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6227,7 +6239,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = f.air.typeOf(bin_op.lhs); + const ptr_ty = f.typeOf(bin_op.lhs); const ty = ptr_ty.childType(); const ptr = try f.resolveInst(bin_op.lhs); const element = try f.resolveInst(bin_op.rhs); @@ -6270,10 +6282,10 @@ fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !vo fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; - const dest_ty = f.air.typeOf(bin_op.lhs); + const dest_ty = f.typeOf(bin_op.lhs); const dest_slice = try f.resolveInst(bin_op.lhs); const value = try f.resolveInst(bin_op.rhs); - const elem_ty = f.air.typeOf(bin_op.rhs); + const elem_ty = f.typeOf(bin_op.rhs); const elem_abi_size = elem_ty.abiSize(mod); const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; const writer = f.object.writer(); @@ -6393,8 +6405,8 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { const bin_op = f.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try f.resolveInst(bin_op.lhs); const src_ptr = try f.resolveInst(bin_op.rhs); - const dest_ty = f.air.typeOf(bin_op.lhs); - const src_ty = f.air.typeOf(bin_op.rhs); + const dest_ty = f.typeOf(bin_op.lhs); + const src_ty = f.typeOf(bin_op.rhs); const writer = f.object.writer(); try writer.writeAll("memcpy("); @@ -6434,7 +6446,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const union_ty = f.air.typeOf(bin_op.lhs).childType(); + const union_ty = f.typeOf(bin_op.lhs).childType(); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety().?; @@ -6455,11 +6467,11 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const union_ty = f.air.typeOf(ty_op.operand); + const union_ty = f.typeOf(ty_op.operand); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); const a = try Assignment.start(f, writer, inst_ty); @@ -6473,8 +6485,8 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; - const inst_ty = f.air.typeOfIndex(inst); - const enum_ty = f.air.typeOf(un_op); + const inst_ty = f.typeOfIndex(inst); + const enum_ty = f.typeOf(un_op); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); @@ -6494,7 +6506,7 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const local = try f.allocLocal(inst, inst_ty); @@ -6513,7 +6525,7 @@ fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -6539,7 +6551,7 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { const rhs = try f.resolveInst(extra.rhs); try reap(f, inst, &.{ pl_op.operand, extra.lhs, extra.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6570,7 +6582,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const lhs = try f.resolveInst(extra.a); const rhs = try f.resolveInst(extra.b); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6607,10 +6619,10 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { const reduce = f.air.instructions.items(.data)[inst].reduce; const target = mod.getTarget(); - const scalar_ty = f.air.typeOfIndex(inst); + const scalar_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(reduce.operand); try reap(f, inst, &.{reduce.operand}); - const operand_ty = f.air.typeOf(reduce.operand); + const operand_ty = f.typeOf(reduce.operand); const writer = f.object.writer(); const use_operator = scalar_ty.bitSize(mod) <= 64; @@ -6762,7 +6774,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const len = @intCast(usize, inst_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]); const gpa = f.object.dg.gpa; @@ -6892,10 +6904,10 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; - const union_ty = f.air.typeOfIndex(inst); + const union_ty = f.typeOfIndex(inst); const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_name = union_obj.fields.keys()[extra.field_index]; - const payload_ty = f.air.typeOf(extra.init); + const payload_ty = f.typeOf(extra.init); const payload = try f.resolveInst(extra.init); try reap(f, inst, &.{extra.init}); @@ -6965,7 +6977,7 @@ fn airWasmMemorySize(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); @@ -6979,7 +6991,7 @@ fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[inst].pl_op; const writer = f.object.writer(); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); const local = try f.allocLocal(inst, inst_ty); @@ -6999,7 +7011,7 @@ fn airFloatNeg(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const operand_ty = f.air.typeOf(un_op); + const operand_ty = f.typeOf(un_op); const scalar_ty = operand_ty.scalarType(mod); const writer = f.object.writer(); @@ -7025,7 +7037,7 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVal const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -7054,7 +7066,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CVa const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -7088,7 +7100,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { const addend = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand }); - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(mod); const writer = f.object.writer(); @@ -7114,7 +7126,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { } fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const fn_cty = try f.typeToCType(f.object.dg.decl.?.ty, .complete); const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len; @@ -7133,7 +7145,7 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { fn airCVaArg(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const va_list = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -7164,7 +7176,7 @@ fn airCVaEnd(f: *Function, inst: Air.Inst.Index) !CValue { fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; - const inst_ty = f.air.typeOfIndex(inst); + const inst_ty = f.typeOfIndex(inst); const va_list = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c3d3da0d32..2e42e8e3fc 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4497,7 +4497,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ - .ty = self.air.typeOf(inst), + .ty = self.typeOf(inst), .val = self.air.value(inst, mod).?, }); gop.value_ptr.* = llvm_val; @@ -4528,11 +4528,12 @@ pub const FuncGen = struct { } fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void { + const mod = self.dg.module; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; - } const opt_value: ?*llvm.Value = switch (air_tags[inst]) { // zig fmt: off @@ -4751,6 +4752,8 @@ pub const FuncGen = struct { .constant => unreachable, .const_ty => unreachable, + .interned => unreachable, + .unreach => self.airUnreach(inst), .dbg_stmt => self.airDbgStmt(inst), .dbg_inline_begin => try self.airDbgInlineBegin(inst), @@ -4781,7 +4784,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const callee_ty = self.air.typeOf(pl_op.operand); + const callee_ty = self.typeOf(pl_op.operand); const mod = self.dg.module; const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, @@ -4815,7 +4818,7 @@ pub const FuncGen = struct { .no_bits => continue, .byval => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const llvm_param_ty = try self.dg.lowerType(param_ty); if (isByRef(param_ty, mod)) { @@ -4829,7 +4832,7 @@ pub const FuncGen = struct { }, .byref => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); if (isByRef(param_ty, mod)) { try llvm_args.append(llvm_arg); @@ -4844,7 +4847,7 @@ pub const FuncGen = struct { }, .byref_mut => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const alignment = param_ty.abiAlignment(mod); @@ -4865,7 +4868,7 @@ pub const FuncGen = struct { }, .abi_sized_int => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_arg = try self.resolveInst(arg); const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); const int_llvm_ty = self.context.intType(abi_size * 8); @@ -4901,7 +4904,7 @@ pub const FuncGen = struct { }, .multiple_llvm_types => { const arg = args[it.zig_index - 1]; - const param_ty = self.air.typeOf(arg); + const param_ty = self.typeOf(arg); const llvm_types = it.llvm_types_buffer[0..it.llvm_types_len]; const llvm_arg = try self.resolveInst(arg); const is_by_ref = isByRef(param_ty, mod); @@ -4930,7 +4933,7 @@ pub const FuncGen = struct { }, .float_array => |count| { const arg = args[it.zig_index - 1]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); @@ -4950,7 +4953,7 @@ pub const FuncGen = struct { .i32_array, .i64_array => |arr_len| { const elem_size: u8 = if (lowering == .i32_array) 32 else 64; const arg = args[it.zig_index - 1]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); var llvm_arg = try self.resolveInst(arg); if (!isByRef(arg_ty, mod)) { const p = self.buildAlloca(llvm_arg.typeOf(), null); @@ -5094,7 +5097,7 @@ pub const FuncGen = struct { fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ret_ty = self.air.typeOf(un_op); + const ret_ty = self.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { const operand = try self.resolveInst(un_op); var ptr_ty_payload: Type.Payload.ElemType = .{ @@ -5150,7 +5153,7 @@ pub const FuncGen = struct { fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(); const fn_info = self.dg.decl.ty.fnInfo(); const mod = self.dg.module; @@ -5236,7 +5239,7 @@ pub const FuncGen = struct { fn airCVaStart(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; - const va_list_ty = self.air.typeOfIndex(inst); + const va_list_ty = self.typeOfIndex(inst); const llvm_va_list_ty = try self.dg.lowerType(va_list_ty); const result_alignment = va_list_ty.abiAlignment(mod); @@ -5266,7 +5269,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const operand_ty = self.air.typeOf(bin_op.lhs); + const operand_ty = self.typeOf(bin_op.lhs); return self.cmp(lhs, rhs, operand_ty, op); } @@ -5279,7 +5282,7 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const vec_ty = self.air.typeOf(extra.lhs); + const vec_ty = self.typeOf(extra.lhs); const cmp_op = extra.compareOperator(); return self.cmp(lhs, rhs, vec_ty, cmp_op); @@ -5396,12 +5399,12 @@ pub const FuncGen = struct { } fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const parent_bb = self.context.createBasicBlock("Block"); - const mod = self.dg.module; if (inst_ty.isNoReturn()) { try self.genBody(body); @@ -5453,7 +5456,7 @@ pub const FuncGen = struct { const block = self.blocks.get(branch.block_inst).?; // Add the values to the lists only if the break provides a value. - const operand_ty = self.air.typeOf(branch.operand); + const operand_ty = self.typeOf(branch.operand); const mod = self.dg.module; if (operand_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.zigTypeTag(mod) == .Fn) { const val = try self.resolveInst(branch.operand); @@ -5497,8 +5500,8 @@ pub const FuncGen = struct { const err_union = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const err_union_ty = self.typeOf(pl_op.operand); + const payload_ty = self.typeOfIndex(inst); const can_elide_load = if (isByRef(payload_ty, mod)) self.canElideLoad(body_tail) else false; const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused); @@ -5509,7 +5512,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try self.resolveInst(extra.data.ptr); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(); const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused); } @@ -5650,7 +5653,7 @@ pub const FuncGen = struct { // would have been emitted already. Also the main loop in genBody can // be while(true) instead of for(body), which will eliminate 1 branch on // a hot path. - if (body.len == 0 or !self.air.typeOfIndex(body[body.len - 1]).isNoReturn()) { + if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn()) { _ = self.builder.buildBr(loop_block); } return null; @@ -5659,11 +5662,11 @@ pub const FuncGen = struct { fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const array_ty = operand_ty.childType(); const llvm_usize = try self.dg.lowerType(Type.usize); const len = llvm_usize.constInt(array_ty.arrayLen(), .False); - const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const slice_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { const partial = self.builder.buildInsertValue(slice_llvm_ty.getUndef(), operand, 0, ""); @@ -5683,10 +5686,10 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_scalar_ty = operand_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); const target = mod.getTarget(); @@ -5743,10 +5746,10 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_scalar_ty = operand_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); @@ -5832,7 +5835,7 @@ pub const FuncGen = struct { fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); - const slice_ptr_ty = self.air.typeOf(ty_op.operand); + const slice_ptr_ty = self.typeOf(ty_op.operand); const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType()); return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, ""); @@ -5842,7 +5845,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); const elem_ty = slice_ty.childType(); @@ -5863,7 +5866,7 @@ pub const FuncGen = struct { fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); @@ -5878,7 +5881,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try self.dg.lowerType(array_ty); @@ -5920,7 +5923,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = try self.resolveInst(bin_op.lhs); @@ -5948,7 +5951,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); @@ -5973,7 +5976,7 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; const struct_ptr = try self.resolveInst(struct_field.struct_operand); - const struct_ptr_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ptr_ty = self.typeOf(struct_field.struct_operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, struct_field.field_index); } @@ -5984,7 +5987,7 @@ pub const FuncGen = struct { ) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolveInst(ty_op.operand); - const struct_ptr_ty = self.air.typeOf(ty_op.operand); + const struct_ptr_ty = self.typeOf(ty_op.operand); return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index); } @@ -5993,7 +5996,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ty = self.typeOf(struct_field.struct_operand); const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); @@ -6234,7 +6237,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const di_local_var = dib.createAutoVariable( self.di_scope.?, @@ -6259,7 +6262,7 @@ pub const FuncGen = struct { const dib = self.dg.object.di_builder orelse return null; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); - const operand_ty = self.air.typeOf(pl_op.operand); + const operand_ty = self.typeOf(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); if (needDbgVarWorkaround(self.dg)) { @@ -6361,7 +6364,7 @@ pub const FuncGen = struct { llvm_ret_indirect[i] = (output != .none) and constraintAllowsMemory(constraint); if (output != .none) { const output_inst = try self.resolveInst(output); - const output_ty = self.air.typeOf(output); + const output_ty = self.typeOf(output); assert(output_ty.zigTypeTag(mod) == .Pointer); const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType()); @@ -6379,7 +6382,7 @@ pub const FuncGen = struct { llvm_ret_i += 1; } } else { - const ret_ty = self.air.typeOfIndex(inst); + const ret_ty = self.typeOfIndex(inst); llvm_ret_types[llvm_ret_i] = try self.dg.lowerType(ret_ty); llvm_ret_i += 1; } @@ -6414,7 +6417,7 @@ pub const FuncGen = struct { extra_i += (constraint.len + name.len + (2 + 3)) / 4; const arg_llvm_value = try self.resolveInst(input); - const arg_ty = self.air.typeOf(input); + const arg_ty = self.typeOf(input); var llvm_elem_ty: ?*llvm.Type = null; if (isByRef(arg_ty, mod)) { llvm_elem_ty = try self.dg.lowerPtrElemTy(arg_ty); @@ -6636,7 +6639,7 @@ pub const FuncGen = struct { if (output != .none) { const output_ptr = try self.resolveInst(output); - const output_ptr_ty = self.air.typeOf(output); + const output_ptr_ty = self.typeOf(output); const store_inst = self.builder.buildStore(output_value, output_ptr); store_inst.setAlignment(output_ptr_ty.ptrAlignment(mod)); @@ -6657,7 +6660,7 @@ pub const FuncGen = struct { ) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const optional_llvm_ty = try self.dg.lowerType(optional_ty); var buf: Type.Payload.ElemType = undefined; @@ -6706,7 +6709,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); const err_set_ty = try self.dg.lowerType(Type.anyerror); @@ -6746,7 +6749,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand).childType(); + const optional_ty = self.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -6768,7 +6771,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand).childType(); + const optional_ty = self.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); const non_null_bit = self.context.intType(8).constInt(1, .False); @@ -6801,8 +6804,8 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOf(ty_op.operand); + const payload_ty = self.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; if (optional_ty.optionalReprIsPayload(mod)) { @@ -6824,9 +6827,9 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -6859,7 +6862,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); @@ -6893,7 +6896,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const err_union_ty = self.air.typeOf(ty_op.operand).childType(); + const err_union_ty = self.typeOf(ty_op.operand).childType(); const payload_ty = err_union_ty.errorUnionPayload(); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); @@ -6946,12 +6949,12 @@ pub const FuncGen = struct { fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); const non_null_bit = self.context.intType(8).constInt(1, .False); comptime assert(optional_layout_version == 3); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); if (optional_ty.optionalReprIsPayload(mod)) { return operand; } @@ -6976,9 +6979,9 @@ pub const FuncGen = struct { fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_un_ty = self.air.typeOfIndex(inst); + const err_un_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; } @@ -7009,7 +7012,7 @@ pub const FuncGen = struct { fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_un_ty = self.air.typeOfIndex(inst); + const err_un_ty = self.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -7069,7 +7072,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Bin, data.payload).data; const vector_ptr = try self.resolveInst(data.vector_ptr); - const vector_ptr_ty = self.air.typeOf(data.vector_ptr); + const vector_ptr_ty = self.typeOf(data.vector_ptr); const index = try self.resolveInst(extra.lhs); const operand = try self.resolveInst(extra.rhs); @@ -7090,7 +7093,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); + const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmin, scalar_ty, 2, .{ lhs, rhs }); if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMin(lhs, rhs, ""); @@ -7102,7 +7105,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const scalar_ty = self.air.typeOfIndex(inst).scalarType(mod); + const scalar_ty = self.typeOfIndex(inst).scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.fmax, scalar_ty, 2, .{ lhs, rhs }); if (scalar_ty.isSignedInt(mod)) return self.builder.buildSMax(lhs, rhs, ""); @@ -7114,7 +7117,7 @@ pub const FuncGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const llvm_slice_ty = try self.dg.lowerType(inst_ty); // In case of slicing a global, the result type looks something like `{ i8*, i64 }` @@ -7130,7 +7133,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.add, inst_ty, 2, .{ lhs, rhs }); @@ -7153,7 +7156,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{}); @@ -7169,7 +7172,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.sub, inst_ty, 2, .{ lhs, rhs }); @@ -7192,7 +7195,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); @@ -7207,7 +7210,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.buildFloatOp(.mul, inst_ty, 2, .{ lhs, rhs }); @@ -7230,7 +7233,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); @@ -7244,7 +7247,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); } @@ -7256,7 +7259,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { @@ -7274,7 +7277,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) { @@ -7314,7 +7317,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs }); @@ -7329,7 +7332,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const scalar_ty = inst_ty.scalarType(mod); if (scalar_ty.isRuntimeFloat()) return self.buildFloatOp(.fmod, inst_ty, 2, .{ lhs, rhs }); @@ -7344,7 +7347,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const inst_llvm_ty = try self.dg.lowerType(inst_ty); const scalar_ty = inst_ty.scalarType(mod); @@ -7386,7 +7389,7 @@ pub const FuncGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); switch (ptr_ty.ptrSize()) { .One => { @@ -7412,7 +7415,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const negative_offset = self.builder.buildNeg(offset, ""); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); switch (ptr_ty.ptrSize()) { .One => { @@ -7447,9 +7450,9 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); + const lhs_ty = self.typeOf(extra.lhs); const scalar_ty = lhs_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const intrinsic_name = if (scalar_ty.isSignedInt(mod)) signed_intrinsic else unsigned_intrinsic; @@ -7735,7 +7738,7 @@ pub const FuncGen = struct { const mulend2 = try self.resolveInst(extra.rhs); const addend = try self.resolveInst(pl_op.operand); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); return self.buildFloatOp(.fma, ty, 3, .{ mulend1, mulend2, addend }); } @@ -7747,12 +7750,12 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const llvm_dest_ty = try self.dg.lowerType(dest_ty); const casted_rhs = if (rhs_scalar_ty.bitSize(mod) < lhs_scalar_ty.bitSize(mod)) @@ -7821,8 +7824,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); @@ -7841,8 +7844,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_type = self.air.typeOf(bin_op.lhs); - const rhs_type = self.air.typeOf(bin_op.rhs); + const lhs_type = self.typeOf(bin_op.lhs); + const rhs_type = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_type.scalarType(mod); const rhs_scalar_ty = rhs_type.scalarType(mod); @@ -7860,8 +7863,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); const lhs_bits = lhs_scalar_ty.bitSize(mod); @@ -7903,8 +7906,8 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const lhs_scalar_ty = lhs_ty.scalarType(mod); const rhs_scalar_ty = rhs_ty.scalarType(mod); @@ -7932,11 +7935,11 @@ pub const FuncGen = struct { fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_info = dest_ty.intInfo(mod); const dest_llvm_ty = try self.dg.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_info = operand_ty.intInfo(mod); if (operand_info.bits < dest_info.bits) { @@ -7954,7 +7957,7 @@ pub const FuncGen = struct { fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } @@ -7962,8 +7965,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -7992,8 +7995,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const target = mod.getTarget(); const dest_bits = dest_ty.floatBits(target); const src_bits = operand_ty.floatBits(target); @@ -8021,16 +8024,16 @@ pub const FuncGen = struct { fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const operand_ptr = self.sliceOrArrayPtr(operand, ptr_ty); - const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, ""); } fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); - const inst_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); return self.bitCast(operand, operand_ty, inst_ty); } @@ -8159,17 +8162,17 @@ pub const FuncGen = struct { } fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const arg_val = self.args[self.arg_index]; self.arg_index += 1; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); if (self.dg.object.di_builder) |dib| { if (needDbgVarWorkaround(self.dg)) { return arg_val; } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const mod = self.dg.module; const func = self.dg.decl.getFunction().?; const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; @@ -8203,9 +8206,9 @@ pub const FuncGen = struct { } fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const ptr_ty = self.air.typeOfIndex(inst); - const pointee_type = ptr_ty.childType(); const mod = self.dg.module; + const ptr_ty = self.typeOfIndex(inst); + const pointee_type = ptr_ty.childType(); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.lowerType(pointee_type); @@ -8214,9 +8217,9 @@ pub const FuncGen = struct { } fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const ptr_ty = self.air.typeOfIndex(inst); - const ret_ty = ptr_ty.childType(); const mod = self.dg.module; + const ptr_ty = self.typeOfIndex(inst); + const ret_ty = ptr_ty.childType(); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.lowerType(ret_ty); @@ -8232,7 +8235,7 @@ pub const FuncGen = struct { fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); const mod = self.dg.module; @@ -8285,7 +8288,7 @@ pub const FuncGen = struct { const mod = fg.dg.module; const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = fg.air.typeOf(ty_op.operand); + const ptr_ty = fg.typeOf(ty_op.operand); const ptr_info = ptr_ty.ptrInfo().data; const ptr = try fg.resolveInst(ty_op.operand); @@ -8361,7 +8364,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(extra.ptr); var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); - const operand_ty = self.air.typeOf(extra.ptr).elemType(); + const operand_ty = self.typeOf(extra.ptr).elemType(); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating @@ -8383,7 +8386,7 @@ pub const FuncGen = struct { ); result.setWeak(llvm.Bool.fromBool(is_weak)); - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); var payload = self.builder.buildExtractValue(result, 0, ""); if (opt_abi_ty != null) { @@ -8406,7 +8409,7 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const operand_ty = ptr_ty.elemType(); const operand = try self.resolveInst(extra.operand); const is_signed_int = operand_ty.isSignedInt(mod); @@ -8461,7 +8464,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); - const ptr_ty = self.air.typeOf(atomic_load.ptr); + const ptr_ty = self.typeOf(atomic_load.ptr); const ptr_info = ptr_ty.ptrInfo().data; const elem_ty = ptr_info.pointee_type; if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) @@ -8494,7 +8497,7 @@ pub const FuncGen = struct { ) !?*llvm.Value { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); @@ -8517,8 +8520,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const elem_ty = self.typeOf(bin_op.rhs); const module = self.dg.module; const target = module.getTarget(); const dest_ptr_align = ptr_ty.ptrAlignment(mod); @@ -8641,9 +8644,9 @@ pub const FuncGen = struct { fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_slice = try self.resolveInst(bin_op.lhs); - const dest_ptr_ty = self.air.typeOf(bin_op.lhs); + const dest_ptr_ty = self.typeOf(bin_op.lhs); const src_slice = try self.resolveInst(bin_op.rhs); - const src_ptr_ty = self.air.typeOf(bin_op.rhs); + const src_ptr_ty = self.typeOf(bin_op.rhs); const src_ptr = self.sliceOrArrayPtr(src_slice, src_ptr_ty); const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); @@ -8663,7 +8666,7 @@ pub const FuncGen = struct { fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const un_ty = self.air.typeOf(bin_op.lhs).childType(); + const un_ty = self.typeOf(bin_op.lhs).childType(); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); @@ -8684,7 +8687,7 @@ pub const FuncGen = struct { fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const un_ty = self.air.typeOf(ty_op.operand); + const un_ty = self.typeOf(ty_op.operand); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_handle = try self.resolveInst(ty_op.operand); @@ -8708,7 +8711,7 @@ pub const FuncGen = struct { fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); return self.buildFloatOp(op, operand_ty, 1, .{operand}); } @@ -8718,7 +8721,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); return self.buildFloatOp(.neg, operand_ty, 1, .{operand}); } @@ -8726,7 +8729,7 @@ pub const FuncGen = struct { fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const llvm_i1 = self.context.intType(1); @@ -8735,7 +8738,7 @@ pub const FuncGen = struct { const params = [_]*llvm.Value{ operand, llvm_i1.constNull() }; const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); const bits = operand_ty.intInfo(mod).bits; @@ -8752,7 +8755,7 @@ pub const FuncGen = struct { fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const params = [_]*llvm.Value{operand}; @@ -8760,7 +8763,7 @@ pub const FuncGen = struct { const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); const bits = operand_ty.intInfo(mod).bits; @@ -8777,7 +8780,7 @@ pub const FuncGen = struct { fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); var bits = operand_ty.intInfo(mod).bits; assert(bits % 8 == 0); @@ -8815,7 +8818,7 @@ pub const FuncGen = struct { const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, ¶ms, params.len, .C, .Auto, ""); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_llvm_ty = try self.dg.lowerType(result_ty); const result_bits = result_ty.intInfo(mod).bits; if (bits > result_bits) { @@ -8876,7 +8879,7 @@ pub const FuncGen = struct { fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const enum_ty = self.air.typeOf(un_op); + const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty); const params = [_]*llvm.Value{operand}; @@ -8954,7 +8957,7 @@ pub const FuncGen = struct { fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const enum_ty = self.air.typeOf(un_op); + const enum_ty = self.typeOf(un_op); const llvm_fn = try self.getEnumTagNameFunction(enum_ty); const params = [_]*llvm.Value{operand}; @@ -9083,7 +9086,7 @@ pub const FuncGen = struct { fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const slice_ty = self.air.typeOfIndex(inst); + const slice_ty = self.typeOfIndex(inst); const slice_llvm_ty = try self.dg.lowerType(slice_ty); const error_name_table_ptr = try self.getErrorNameTable(); @@ -9097,7 +9100,7 @@ pub const FuncGen = struct { fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const scalar = try self.resolveInst(ty_op.operand); - const vector_ty = self.air.typeOfIndex(inst); + const vector_ty = self.typeOfIndex(inst); const len = vector_ty.vectorLen(); return self.builder.buildVectorSplat(len, scalar, ""); } @@ -9120,7 +9123,7 @@ pub const FuncGen = struct { const b = try self.resolveInst(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.air.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(); // LLVM uses integers larger than the length of the first array to // index into the second array. This was deemed unnecessarily fragile @@ -9219,8 +9222,8 @@ pub const FuncGen = struct { const reduce = self.air.instructions.items(.data)[inst].reduce; const operand = try self.resolveInst(reduce.operand); - const operand_ty = self.air.typeOf(reduce.operand); - const scalar_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(reduce.operand); + const scalar_ty = self.typeOfIndex(inst); switch (reduce.operation) { .And => return self.builder.buildAndReduce(operand), @@ -9300,12 +9303,12 @@ pub const FuncGen = struct { } fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.lowerType(result_ty); - const mod = self.dg.module; switch (result_ty.zigTypeTag(mod)) { .Vector => { @@ -9370,7 +9373,7 @@ pub const FuncGen = struct { const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); var field_ptr_payload: Type.Payload.Pointer = .{ .data = .{ - .pointee_type = self.air.typeOf(elem), + .pointee_type = self.typeOf(elem), .@"align" = result_ty.structFieldAlign(i, mod), .@"addrspace" = .generic, }, @@ -9440,7 +9443,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; - const union_ty = self.air.typeOfIndex(inst); + const union_ty = self.typeOfIndex(inst); const union_llvm_ty = try self.dg.lowerType(union_ty); const layout = union_ty.unionGetLayout(mod); const union_obj = union_ty.cast(Type.Payload.Union).?.data; @@ -9643,7 +9646,7 @@ pub const FuncGen = struct { fn airAddrSpaceCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const inst_ty = self.air.typeOfIndex(inst); + const inst_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const llvm_dest_ty = try self.dg.lowerType(inst_ty); @@ -9830,7 +9833,7 @@ pub const FuncGen = struct { switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const result_ty_info = result_ty.ptrInfo().data; if (result_ty_info.host_size != 0) { @@ -10172,6 +10175,16 @@ pub const FuncGen = struct { ); return call; } + + fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { + const mod = fg.dg.module; + return fg.air.typeOf(inst, mod.intern_pool); + } + + fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { + const mod = fg.dg.module; + return fg.air.typeOfIndex(inst, mod.intern_pool); + } }; fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { @@ -10833,7 +10846,7 @@ const ParamTypeIterator = struct { if (it.zig_index >= args.len) { return null; } else { - return nextInner(it, fg.air.typeOf(args[it.zig_index])); + return nextInner(it, fg.typeOf(args[it.zig_index])); } } else { return nextInner(it, it.fn_info.param_types[it.zig_index]); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index b8c8466427..41abbde1a0 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -233,7 +233,7 @@ pub const DeclGen = struct { fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { const mod = self.module; if (self.air.value(inst, mod)) |val| { - const ty = self.air.typeOf(inst); + const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (val.tag()) { .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, @@ -1720,10 +1720,11 @@ pub const DeclGen = struct { } fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { + const mod = self.module; + const ip = &mod.intern_pool; // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) return; - } const air_tags = self.air.instructions.items(.tag); const maybe_result_id: ?IdRef = switch (air_tags[inst]) { @@ -1847,7 +1848,7 @@ pub const DeclGen = struct { const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocId(); - const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst)); + const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst)); try self.func.body.emit(self.spv.gpa, opcode, .{ .id_result_type = result_type_id, .id_result = result_id, @@ -1862,7 +1863,7 @@ pub const DeclGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); - const result_type_id = try self.resolveTypeId(self.air.typeOfIndex(inst)); + const result_type_id = try self.resolveTypeId(self.typeOfIndex(inst)); // the shift and the base must be the same type in SPIR-V, but in Zig the shift is a smaller int. const shift_id = self.spv.allocId(); @@ -1907,15 +1908,15 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; // LHS and RHS are guaranteed to have the same type, and AIR guarantees // the result to be the same as the LHS and RHS, which matches SPIR-V. - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const bin_op = self.air.instructions.items(.data)[inst].bin_op; var lhs_id = try self.resolve(bin_op.lhs); var rhs_id = try self.resolve(bin_op.rhs); const result_ty_ref = try self.resolveType(ty, .direct); - assert(self.air.typeOf(bin_op.lhs).eql(ty, self.module)); - assert(self.air.typeOf(bin_op.rhs).eql(ty, self.module)); + assert(self.typeOf(bin_op.lhs).eql(ty, self.module)); + assert(self.typeOf(bin_op.rhs).eql(ty, self.module)); // Binary operations are generally applicable to both scalar and vector operations // in SPIR-V, but int and float versions of operations require different opcodes. @@ -1971,8 +1972,8 @@ pub const DeclGen = struct { const lhs = try self.resolve(extra.lhs); const rhs = try self.resolve(extra.rhs); - const operand_ty = self.air.typeOf(extra.lhs); - const result_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(extra.lhs); + const result_ty = self.typeOfIndex(inst); const info = try self.arithmeticTypeInfo(operand_ty); switch (info.class) { @@ -2064,14 +2065,14 @@ pub const DeclGen = struct { fn airShuffle(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const mod = self.module; if (self.liveness.isUnused(inst)) return null; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolve(extra.a); const b = try self.resolve(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.air.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); @@ -2162,8 +2163,8 @@ pub const DeclGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); const offset_id = try self.resolve(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const result_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOf(bin_op.lhs); + const result_ty = self.typeOfIndex(inst); return try self.ptrAdd(result_ty, ptr_ty, ptr_id, offset_id); } @@ -2173,11 +2174,11 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_id = try self.resolve(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const offset_id = try self.resolve(bin_op.rhs); - const offset_ty = self.air.typeOf(bin_op.rhs); + const offset_ty = self.typeOf(bin_op.rhs); const offset_ty_ref = try self.resolveType(offset_ty, .direct); - const result_ty = self.air.typeOfIndex(inst); + const result_ty = self.typeOfIndex(inst); const negative_offset_id = self.spv.allocId(); try self.func.body.emit(self.spv.gpa, .OpSNegate, .{ @@ -2298,8 +2299,8 @@ pub const DeclGen = struct { const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const bool_ty_id = try self.resolveTypeId(Type.bool); - const ty = self.air.typeOf(bin_op.lhs); - assert(ty.eql(self.air.typeOf(bin_op.rhs), self.module)); + const ty = self.typeOf(bin_op.lhs); + assert(ty.eql(self.typeOf(bin_op.rhs), self.module)); return try self.cmp(op, bool_ty_id, ty, lhs_id, rhs_id); } @@ -2337,8 +2338,8 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const result_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const result_ty = self.typeOfIndex(inst); return try self.bitCast(result_ty, operand_ty, operand_id); } @@ -2347,7 +2348,7 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); const mod = self.module; @@ -2391,10 +2392,10 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); const operand_id = try self.resolve(ty_op.operand); const operand_info = try self.arithmeticTypeInfo(operand_ty); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_ty_id = try self.resolveTypeId(dest_ty); const result_id = self.spv.allocId(); @@ -2418,7 +2419,7 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest_info = try self.arithmeticTypeInfo(dest_ty); const dest_ty_id = try self.resolveTypeId(dest_ty); @@ -2455,20 +2456,20 @@ pub const DeclGen = struct { fn airSliceField(self: *DeclGen, inst: Air.Inst.Index, field: u32) !?IdRef { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const field_ty = self.air.typeOfIndex(inst); + const field_ty = self.typeOfIndex(inst); const operand_id = try self.resolve(ty_op.operand); return try self.extractField(field_ty, operand_id, field); } fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); - const ptr_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOfIndex(inst); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); const slice_ptr = try self.extractField(ptr_ty, slice_id, 0); @@ -2477,7 +2478,7 @@ pub const DeclGen = struct { fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); @@ -2514,7 +2515,7 @@ pub const DeclGen = struct { const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); // TODO: Make this return a null ptr or something if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -2526,7 +2527,7 @@ pub const DeclGen = struct { fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2544,7 +2545,7 @@ pub const DeclGen = struct { fn airGetUnionTag(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const un_ty = self.air.typeOf(ty_op.operand); + const un_ty = self.typeOf(ty_op.operand); const mod = self.module; const layout = un_ty.unionGetLayout(mod); @@ -2565,7 +2566,7 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = self.air.typeOf(struct_field.struct_operand); + const struct_ty = self.typeOf(struct_field.struct_operand); const object_id = try self.resolve(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); @@ -2604,8 +2605,8 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolve(ty_op.operand); - const struct_ptr_ty = self.air.typeOf(ty_op.operand); - const result_ptr_ty = self.air.typeOfIndex(inst); + const struct_ptr_ty = self.typeOf(ty_op.operand); + const result_ptr_ty = self.typeOfIndex(inst); return try self.structFieldPtr(result_ptr_ty, struct_ptr_ty, struct_ptr, field_index); } @@ -2661,7 +2662,7 @@ pub const DeclGen = struct { fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; - const ptr_ty = self.air.typeOfIndex(inst); + const ptr_ty = self.typeOfIndex(inst); assert(ptr_ty.ptrAddressSpace() == .generic); const child_ty = ptr_ty.childType(); const child_ty_ref = try self.resolveType(child_ty, .indirect); @@ -2694,7 +2695,7 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.gpa); } - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; @@ -2727,7 +2728,7 @@ pub const DeclGen = struct { fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(br.block_inst).?; - const operand_ty = self.air.typeOf(br.operand); + const operand_ty = self.typeOf(br.operand); const mod = self.module; if (operand_ty.hasRuntimeBits(mod)) { @@ -2777,7 +2778,7 @@ pub const DeclGen = struct { fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const operand = try self.resolve(ty_op.operand); if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; @@ -2787,7 +2788,7 @@ pub const DeclGen = struct { fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr = try self.resolve(bin_op.lhs); const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); @@ -2819,7 +2820,7 @@ pub const DeclGen = struct { fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { const operand = self.air.instructions.items(.data)[inst].un_op; - const operand_ty = self.air.typeOf(operand); + const operand_ty = self.typeOf(operand); const mod = self.module; if (operand_ty.hasRuntimeBits(mod)) { const operand_id = try self.resolve(operand); @@ -2832,7 +2833,7 @@ pub const DeclGen = struct { fn airRetLoad(self: *DeclGen, inst: Air.Inst.Index) !void { const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2853,8 +2854,8 @@ pub const DeclGen = struct { const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const err_union_ty = self.typeOf(pl_op.operand); + const payload_ty = self.typeOfIndex(inst); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); const bool_ty_ref = try self.resolveType(Type.bool, .direct); @@ -2911,7 +2912,7 @@ pub const DeclGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { @@ -2934,7 +2935,7 @@ pub const DeclGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOfIndex(inst); + const err_union_ty = self.typeOfIndex(inst); const payload_ty = err_union_ty.errorUnionPayload(); const operand_id = try self.resolve(ty_op.operand); const eu_layout = self.errorUnionLayout(payload_ty); @@ -2966,7 +2967,7 @@ pub const DeclGen = struct { const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_id = try self.resolve(un_op); - const optional_ty = self.air.typeOf(un_op); + const optional_ty = self.typeOf(un_op); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); @@ -3030,8 +3031,8 @@ pub const DeclGen = struct { const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const optional_ty = self.air.typeOf(ty_op.operand); - const payload_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOf(ty_op.operand); + const payload_ty = self.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -3047,14 +3048,14 @@ pub const DeclGen = struct { const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const payload_ty = self.air.typeOf(ty_op.operand); + const payload_ty = self.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try self.constBool(true, .direct); } const operand_id = try self.resolve(ty_op.operand); - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); if (optional_ty.optionalReprIsPayload(mod)) { return operand_id; } @@ -3068,7 +3069,7 @@ pub const DeclGen = struct { const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolve(pl_op.operand); - const cond_ty = self.air.typeOf(pl_op.operand); + const cond_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const cond_words: u32 = switch (cond_ty.zigTypeTag(mod)) { @@ -3317,7 +3318,7 @@ pub const DeclGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const callee_ty = self.air.typeOf(pl_op.operand); + const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, .Pointer => return self.fail("cannot call function pointers", .{}), @@ -3339,7 +3340,7 @@ pub const DeclGen = struct { // before starting to emit OpFunctionCall instructions. Hence the // temporary params buffer. const arg_id = try self.resolve(arg); - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; params[n_params] = arg_id; @@ -3363,4 +3364,14 @@ pub const DeclGen = struct { return result_id; } + + fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { + const mod = self.module; + return self.air.typeOf(inst, mod.intern_pool); + } + + fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { + const mod = self.module; + return self.air.typeOfIndex(inst, mod.intern_pool); + } }; diff --git a/src/print_air.zig b/src/print_air.zig index e8875ff018..39a244e11f 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -306,6 +306,7 @@ const Writer = struct { .struct_field_ptr => try w.writeStructField(s, inst), .struct_field_val => try w.writeStructField(s, inst), .constant => try w.writeConstant(s, inst), + .interned => try w.writeInterned(s, inst), .assembly => try w.writeAssembly(s, inst), .dbg_stmt => try w.writeDbgStmt(s, inst), @@ -515,7 +516,7 @@ const Writer = struct { const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Bin, pl_op.payload).data; - const elem_ty = w.air.typeOfIndex(inst).childType(); + const elem_ty = w.typeOfIndex(inst).childType(); try w.writeType(s, elem_ty); try s.writeAll(", "); try w.writeOperand(s, inst, 0, pl_op.operand); @@ -614,6 +615,14 @@ const Writer = struct { try s.print(", {}", .{val.fmtValue(ty, w.module)}); } + fn writeInterned(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; + const ip_index = w.air.instructions.items(.data)[inst].interned; + const ty = ip_index.toType(); + try w.writeType(s, ty); + try s.print(", {}", .{ip_index.toValue().fmtValue(ty, mod)}); + } + fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const extra = w.air.extraData(Air.Asm, ty_pl.payload); @@ -622,7 +631,7 @@ const Writer = struct { var extra_i: usize = extra.end; var op_index: usize = 0; - const ret_ty = w.air.typeOfIndex(inst); + const ret_ty = w.typeOfIndex(inst); try w.writeType(s, ret_ty); if (is_volatile) { @@ -985,4 +994,9 @@ const Writer = struct { try s.print("%{d}", .{inst}); if (dies) try s.writeByte('!'); } + + fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type { + const mod = w.module; + return w.air.typeOfIndex(inst, mod.intern_pool); + } }; diff --git a/src/type.zig b/src/type.zig index 259079a26c..94fd4c2eaf 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2827,21 +2827,35 @@ pub const Type = struct { }; } - /// TODO add enums with no fields here pub fn isNoReturn(ty: Type) bool { - switch (ty.tag()) { - .noreturn => return true, - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - const names = err_set_obj.names.keys(); - return names.len == 0; - }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - const names = name_map.keys(); - return names.len == 0; - }, + switch (@enumToInt(ty.ip_index)) { + @enumToInt(InternPool.Index.first_type)...@enumToInt(InternPool.Index.noreturn_type) - 1 => return false, + + @enumToInt(InternPool.Index.noreturn_type) => return true, + + @enumToInt(InternPool.Index.noreturn_type) + 1...@enumToInt(InternPool.Index.last_type) => return false, + + @enumToInt(InternPool.Index.first_value)...@enumToInt(InternPool.Index.last_value) => unreachable, + @enumToInt(InternPool.Index.generic_poison) => unreachable, + + // TODO add empty error sets here + // TODO add enums with no fields here else => return false, + + @enumToInt(InternPool.Index.none) => switch (ty.tag()) { + .noreturn => return true, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + return names.len == 0; + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + return names.len == 0; + }, + else => return false, + }, } } -- cgit v1.2.3 From 4cd8a40b3b34d4e68853088dd637a9da9b6a8891 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 16:07:36 -0700 Subject: stage2: move float types to InternPool --- src/Sema.zig | 142 ++++++++++---- src/arch/wasm/CodeGen.zig | 2 +- src/codegen/c/type.zig | 14 +- src/codegen/llvm.zig | 10 +- src/type.zig | 477 +++++++++++++++++++++++++++------------------- src/value.zig | 14 +- 6 files changed, 403 insertions(+), 256 deletions(-) (limited to 'src/arch') diff --git a/src/Sema.zig b/src/Sema.zig index 14037d030e..f93ceb19e6 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5138,7 +5138,7 @@ fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins defer tracy.end(); const int = sema.code.instructions.items(.data)[inst].int; - return sema.addIntUnsigned(Type.initTag(.comptime_int), int); + return sema.addIntUnsigned(Type.comptime_int, int); } fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5154,7 +5154,7 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. @memcpy(mem.sliceAsBytes(limbs), limb_bytes); return sema.addConstant( - Type.initTag(.comptime_int), + Type.comptime_int, try Value.Tag.int_big_positive.create(arena, limbs), ); } @@ -5164,7 +5164,7 @@ fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const arena = sema.arena; const number = sema.code.instructions.items(.data)[inst].float; return sema.addConstant( - Type.initTag(.comptime_float), + Type.comptime_float, try Value.Tag.float_64.create(arena, number), ); } @@ -5176,7 +5176,7 @@ fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; const number = extra.get(); return sema.addConstant( - Type.initTag(.comptime_float), + Type.comptime_float, try Value.Tag.float_128.create(arena, number), ); } @@ -15152,8 +15152,8 @@ fn zirAsm( const uncasted_arg = try sema.resolveInst(input.data.operand); const uncasted_arg_ty = sema.typeOf(uncasted_arg); switch (uncasted_arg_ty.zigTypeTag(mod)) { - .ComptimeInt => arg.* = try sema.coerce(block, Type.initTag(.usize), uncasted_arg, src), - .ComptimeFloat => arg.* = try sema.coerce(block, Type.initTag(.f64), uncasted_arg, src), + .ComptimeInt => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src), + .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src), else => { arg.* = uncasted_arg; try sema.queueFullTypeResolution(uncasted_arg_ty); @@ -31369,14 +31369,59 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const mod = sema.mod; - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => return false, + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => false, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .@"anyframe", + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -31409,12 +31454,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .anyopaque, .bool, .void, @@ -31455,7 +31494,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .single_const_pointer_to_comptime_int, .type, .comptime_int, - .comptime_float, .enum_literal, .type_info, .function, @@ -32926,14 +32964,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }; switch (ty.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, .comptime_int, - .comptime_float, .u1, .u8, .i8, @@ -33193,19 +33224,12 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .c_ulong => return .c_ulong_type, .c_longlong => return .c_longlong_type, .c_ulonglong => return .c_ulonglong_type, - .c_longdouble => return .c_longdouble_type, - .f16 => return .f16_type, - .f32 => return .f32_type, - .f64 => return .f64_type, - .f80 => return .f80_type, - .f128 => return .f128_type, .anyopaque => return .anyopaque_type, .bool => return .bool_type, .void => return .void_type, .type => return .type_type, .anyerror => return .anyerror_type, .comptime_int => return .comptime_int_type, - .comptime_float => return .comptime_float_type, .noreturn => return .noreturn_type, .@"anyframe" => return .anyframe_type, .null => return .null_type, @@ -33595,7 +33619,52 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| return switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .@"anyframe", + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -33628,20 +33697,12 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .anyopaque, .bool, .void, .anyerror, .noreturn, .@"anyframe", - .null, - .undefined, .atomic_order, .atomic_rmw_op, .calling_convention, @@ -33674,8 +33735,9 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .single_const_pointer_to_comptime_int, .type, .comptime_int, - .comptime_float, .enum_literal, + .null, + .undefined, .type_info, .function, => true, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index cd61eaf1fb..bbba43d265 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3594,7 +3594,7 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn const mod = func.bin_file.base.options.module.?; // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand; - if (wanted_ty.tag() == .f16 or given_ty.tag() == .f16) return operand; + if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand; if (wanted_ty.bitSize(mod) > 64) return operand; assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod))); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 5064b84b1d..b964d16bd9 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1412,13 +1412,13 @@ pub const CType = extern union { .Bool => self.init(.bool), - .Float => self.init(switch (ty.tag()) { - .f16 => .zig_f16, - .f32 => .zig_f32, - .f64 => .zig_f64, - .f80 => .zig_f80, - .f128 => .zig_f128, - .c_longdouble => .zig_c_longdouble, + .Float => self.init(switch (ty.ip_index) { + .f16_type => .zig_f16, + .f32_type => .zig_f32, + .f64_type => .zig_f64, + .f80_type => .zig_f80, + .f128_type => .zig_f128, + .c_longdouble_type => .zig_c_longdouble, else => unreachable, }), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5bc06e4bfc..ce78b06f2e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -10932,7 +10932,7 @@ const ParamTypeIterator = struct { .riscv32, .riscv64 => { it.zig_index += 1; it.llvm_index += 1; - if (ty.tag() == .f16) { + if (ty.ip_index == .f16_type) { return .as_u16; } switch (riscv_c_abi.classifyType(ty, mod)) { @@ -11264,10 +11264,10 @@ fn backendSupportsF128(target: std.Target) bool { /// LLVM does not support all relevant intrinsics for all targets, so we /// may need to manually generate a libc call fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool { - return switch (scalar_ty.tag()) { - .f16 => backendSupportsF16(target), - .f80 => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target), - .f128 => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target), + return switch (scalar_ty.ip_index) { + .f16_type => backendSupportsF16(target), + .f80_type => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target), + .f128_type => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target), else => true, }; } diff --git a/src/type.zig b/src/type.zig index 934bfd35ca..205a732710 100644 --- a/src/type.zig +++ b/src/type.zig @@ -50,6 +50,7 @@ pub const Type = struct { .f64, .f80, .f128, + .c_longdouble, => return .Float, .usize, @@ -63,7 +64,6 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, => return .Int, .anyopaque => return .Opaque, @@ -134,14 +134,6 @@ pub const Type = struct { .c_ulonglong, => return .Int, - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - => return .Float, - .error_set, .error_set_single, .anyerror, @@ -154,7 +146,6 @@ pub const Type = struct { .void => return .Void, .type => return .Type, .comptime_int => return .ComptimeInt, - .comptime_float => return .ComptimeFloat, .noreturn => return .NoReturn, .null => return .Null, .undefined => return .Undefined, @@ -618,10 +609,13 @@ pub const Type = struct { } pub fn eql(a: Type, b: Type, mod: *Module) bool { - // As a shortcut, if the small tags / addresses match, we're done. if (a.ip_index != .none or b.ip_index != .none) { + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. return a.ip_index == b.ip_index; } + // As a shortcut, if the small tags / addresses match, we're done. if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { @@ -640,18 +634,10 @@ pub const Type = struct { .c_longlong, .c_ulonglong, - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .bool, .void, .type, .comptime_int, - .comptime_float, .noreturn, .null, .undefined, @@ -1018,7 +1004,11 @@ pub const Type = struct { pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { if (ty.ip_index != .none) { - return mod.intern_pool.indexToKey(ty.ip_index).hashWithHasher(hasher); + // The InternPool data structure hashes based on Key to make interned objects + // unique. An Index can be treated simply as u32 value for the + // purpose of Type/Value hashing and equality. + std.hash.autoHash(hasher, ty.ip_index); + return; } switch (ty.tag()) { .generic_poison => unreachable, @@ -1039,22 +1029,10 @@ pub const Type = struct { std.hash.autoHash(hasher, ty_tag); }, - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - => |ty_tag| { - std.hash.autoHash(hasher, std.builtin.TypeId.Float); - std.hash.autoHash(hasher, ty_tag); - }, - .bool => std.hash.autoHash(hasher, std.builtin.TypeId.Bool), .void => std.hash.autoHash(hasher, std.builtin.TypeId.Void), .type => std.hash.autoHash(hasher, std.builtin.TypeId.Type), .comptime_int => std.hash.autoHash(hasher, std.builtin.TypeId.ComptimeInt), - .comptime_float => std.hash.autoHash(hasher, std.builtin.TypeId.ComptimeFloat), .noreturn => std.hash.autoHash(hasher, std.builtin.TypeId.NoReturn), .null => std.hash.autoHash(hasher, std.builtin.TypeId.Null), .undefined => std.hash.autoHash(hasher, std.builtin.TypeId.Undefined), @@ -1378,19 +1356,12 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, .anyopaque, - .f16, - .f32, - .f64, - .f80, - .f128, .bool, .void, .type, .anyerror, .comptime_int, - .comptime_float, .noreturn, .null, .undefined, @@ -1671,20 +1642,13 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, .anyopaque, - .f16, - .f32, - .f64, - .f80, - .f128, .bool, .void, .type, .anyerror, .@"anyframe", .comptime_int, - .comptime_float, .noreturn, => return writer.writeAll(@tagName(t)), @@ -2067,20 +2031,13 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, .anyopaque, - .f16, - .f32, - .f64, - .f80, - .f128, .bool, .void, .type, .anyerror, .@"anyframe", .comptime_int, - .comptime_float, .noreturn, => try writer.writeAll(@tagName(t)), @@ -2353,6 +2310,7 @@ pub const Type = struct { } pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { + if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { .u1 => return Value.initTag(.u1_type), .u8 => return Value.initTag(.u8_type), @@ -2375,20 +2333,13 @@ pub const Type = struct { .c_ulong => return Value.initTag(.c_ulong_type), .c_longlong => return Value.initTag(.c_longlong_type), .c_ulonglong => return Value.initTag(.c_ulonglong_type), - .c_longdouble => return Value.initTag(.c_longdouble_type), .anyopaque => return Value.initTag(.anyopaque_type), - .f16 => return Value.initTag(.f16_type), - .f32 => return Value.initTag(.f32_type), - .f64 => return Value.initTag(.f64_type), - .f80 => return Value.initTag(.f80_type), - .f128 => return Value.initTag(.f128_type), .bool => return Value.initTag(.bool_type), .void => return Value.initTag(.void_type), .type => return Value.initTag(.type_type), .anyerror => return Value.initTag(.anyerror_type), .@"anyframe" => return Value.initTag(.anyframe_type), .comptime_int => return Value.initTag(.comptime_int_type), - .comptime_float => return Value.initTag(.comptime_float_type), .noreturn => return Value.initTag(.noreturn_type), .null => return Value.initTag(.null_type), .undefined => return Value.initTag(.undefined_type), @@ -2522,12 +2473,6 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .bool, .anyerror, .const_slice_u8, @@ -2588,7 +2533,6 @@ pub const Type = struct { .void, .type, .comptime_int, - .comptime_float, .noreturn, .null, .undefined, @@ -2801,12 +2745,6 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .bool, .void, .manyptr_u8, @@ -2852,7 +2790,6 @@ pub const Type = struct { .generic_poison, .type, .comptime_int, - .comptime_float, .enum_literal, .type_info, // These are function bodies, not function pointers. @@ -3085,7 +3022,74 @@ pub const Type = struct { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .anyopaque, + => return AbiAlignmentAdvanced{ .scalar = 1 }, + + .usize, + .isize, + .export_options, + .extern_options, + .@"anyframe", + => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + + .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, + .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, + .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) }, + .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) }, + .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) }, + .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) }, + .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, + .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, + .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, + .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + + .f16 => return AbiAlignmentAdvanced{ .scalar = 2 }, + .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) }, + .f64 => switch (target.c_type_bit_size(.double)) { + 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) }, + else => return AbiAlignmentAdvanced{ .scalar = 8 }, + }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + else => { + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, + }; + return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; + }, + }, + .f128 => switch (target.c_type_bit_size(.longdouble)) { + 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, + else => return AbiAlignmentAdvanced{ .scalar = 16 }, + }, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return AbiAlignmentAdvanced{ .scalar = 2 }, + + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => return AbiAlignmentAdvanced{ .scalar = 0 }, + + .noreturn => unreachable, + .generic_poison => unreachable, + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -3158,28 +3162,6 @@ pub const Type = struct { .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, - .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - - .f16 => return AbiAlignmentAdvanced{ .scalar = 2 }, - .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) }, - .f64 => switch (target.c_type_bit_size(.double)) { - 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) }, - else => return AbiAlignmentAdvanced{ .scalar = 8 }, - }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - else => { - const u80_ty: Type = .{ - .ip_index = .u80_type, - .legacy = undefined, - }; - return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, mod) }; - }, - }, - .f128 => switch (target.c_type_bit_size(.longdouble)) { - 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) }, - else => return AbiAlignmentAdvanced{ .scalar = 16 }, - }, // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, @@ -3366,7 +3348,6 @@ pub const Type = struct { .empty_struct_literal, .type, .comptime_int, - .comptime_float, .null, .undefined, .enum_literal, @@ -3481,7 +3462,69 @@ pub const Type = struct { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .bool, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + => return AbiSizeAdvanced{ .scalar = 1 }, + + .f16 => return AbiSizeAdvanced{ .scalar = 2 }, + .f32 => return AbiSizeAdvanced{ .scalar = 4 }, + .f64 => return AbiSizeAdvanced{ .scalar = 8 }, + .f128 => return AbiSizeAdvanced{ .scalar = 16 }, + .f80 => switch (target.c_type_bit_size(.longdouble)) { + 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + else => { + const u80_ty: Type = .{ + .ip_index = .u80_type, + .legacy = undefined, + }; + return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; + }, + }, + + .usize, + .isize, + .@"anyframe", + => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + + .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, + .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, + .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, + .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, + .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, + .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, + .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, + .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, + .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, + .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, + + .anyopaque, + .void, + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + => return AbiSizeAdvanced{ .scalar = 0 }, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return AbiSizeAdvanced{ .scalar = 2 }, + + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + + .type_info => unreachable, + .noreturn => unreachable, + .generic_poison => unreachable, + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -3506,7 +3549,6 @@ pub const Type = struct { .anyopaque, .type, .comptime_int, - .comptime_float, .null, .undefined, .enum_literal, @@ -3661,22 +3703,6 @@ pub const Type = struct { .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - - .f16 => return AbiSizeAdvanced{ .scalar = 2 }, - .f32 => return AbiSizeAdvanced{ .scalar = 4 }, - .f64 => return AbiSizeAdvanced{ .scalar = 8 }, - .f128 => return AbiSizeAdvanced{ .scalar = 16 }, - .f80 => switch (target.c_type_bit_size(.longdouble)) { - 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) }, - else => { - const u80_ty: Type = .{ - .ip_index = .u80_type, - .legacy = undefined, - }; - return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, mod) }; - }, - }, // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, @@ -3820,7 +3846,57 @@ pub const Type = struct { .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .f16 => return 16, + .f32 => return 32, + .f64 => return 64, + .f80 => return 80, + .f128 => return 128, + + .usize, + .isize, + .@"anyframe", + => return target.cpu.arch.ptrBitWidth(), + + .c_char => return target.c_type_bit_size(.char), + .c_short => return target.c_type_bit_size(.short), + .c_ushort => return target.c_type_bit_size(.ushort), + .c_int => return target.c_type_bit_size(.int), + .c_uint => return target.c_type_bit_size(.uint), + .c_long => return target.c_type_bit_size(.long), + .c_ulong => return target.c_type_bit_size(.ulong), + .c_longlong => return target.c_type_bit_size(.longlong), + .c_ulonglong => return target.c_type_bit_size(.ulonglong), + .c_longdouble => return target.c_type_bit_size(.longdouble), + + .bool => return 1, + .void => return 0, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return 16, + + .anyopaque => unreachable, + .type => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .noreturn => unreachable, + .null => unreachable, + .undefined => unreachable, + .enum_literal => unreachable, + .generic_poison => unreachable, + + .atomic_order => unreachable, // missing call to resolveTypeFields + .atomic_rmw_op => unreachable, // missing call to resolveTypeFields + .calling_convention => unreachable, // missing call to resolveTypeFields + .address_space => unreachable, // missing call to resolveTypeFields + .float_mode => unreachable, // missing call to resolveTypeFields + .reduce_op => unreachable, // missing call to resolveTypeFields + .call_modifier => unreachable, // missing call to resolveTypeFields + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + .type_info => unreachable, // missing call to resolveTypeFields + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -3836,7 +3912,6 @@ pub const Type = struct { .anyopaque => unreachable, .type => unreachable, .comptime_int => unreachable, - .comptime_float => unreachable, .noreturn => unreachable, .null => unreachable, .undefined => unreachable, @@ -3852,12 +3927,11 @@ pub const Type = struct { .void => return 0, .bool, .u1 => return 1, .u8, .i8 => return 8, - .i16, .u16, .f16 => return 16, + .i16, .u16 => return 16, .u29 => return 29, - .i32, .u32, .f32 => return 32, - .i64, .u64, .f64 => return 64, - .f80 => return 80, - .u128, .i128, .f128 => return 128, + .i32, .u32 => return 32, + .i64, .u64 => return 64, + .u128, .i128 => return 128, .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -3975,7 +4049,6 @@ pub const Type = struct { .c_ulong => return target.c_type_bit_size(.ulong), .c_longlong => return target.c_type_bit_size(.longlong), .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .c_longdouble => return target.c_type_bit_size(.longdouble), .error_set, .error_set_single, @@ -4950,14 +5023,14 @@ pub const Type = struct { } /// Returns `false` for `comptime_float`. - pub fn isRuntimeFloat(self: Type) bool { - return switch (self.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, + pub fn isRuntimeFloat(ty: Type) bool { + return switch (ty.ip_index) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, => true, else => false, @@ -4965,15 +5038,15 @@ pub const Type = struct { } /// Returns `true` for `comptime_float`. - pub fn isAnyFloat(self: Type) bool { - return switch (self.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .comptime_float, + pub fn isAnyFloat(ty: Type) bool { + return switch (ty.ip_index) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_float_type, => true, else => false, @@ -4982,14 +5055,14 @@ pub const Type = struct { /// Asserts the type is a fixed-size float or comptime_float. /// Returns 128 for comptime_float types. - pub fn floatBits(self: Type, target: Target) u16 { - return switch (self.tag()) { - .f16 => 16, - .f32 => 32, - .f64 => 64, - .f80 => 80, - .f128, .comptime_float => 128, - .c_longdouble => target.c_type_bit_size(.longdouble), + pub fn floatBits(ty: Type, target: Target) u16 { + return switch (ty.ip_index) { + .f16_type => 16, + .f32_type => 32, + .f64_type => 64, + .f80_type => 80, + .f128_type, .comptime_float_type => 128, + .c_longdouble_type => target.c_type_bit_size(.longdouble), else => unreachable, }; @@ -5094,14 +5167,7 @@ pub const Type = struct { else => false, }; return switch (ty.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, .comptime_int, - .comptime_float, .u1, .u8, .i8, @@ -5205,14 +5271,7 @@ pub const Type = struct { }; while (true) switch (ty.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, .comptime_int, - .comptime_float, .u1, .u8, .i8, @@ -5391,14 +5450,59 @@ pub const Type = struct { /// TODO merge these implementations together with the "advanced" pattern seen /// elsewhere in this file. pub fn comptimeOnly(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => return false, + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => false, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), .vector_type => @panic("TODO"), .optional_type => @panic("TODO"), .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .@"anyframe", + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), .simple_value => unreachable, @@ -5431,20 +5535,12 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .anyopaque, .bool, .void, .anyerror, .noreturn, .@"anyframe", - .null, - .undefined, .atomic_order, .atomic_rmw_op, .calling_convention, @@ -5477,11 +5573,12 @@ pub const Type = struct { .single_const_pointer_to_comptime_int, .type, .comptime_int, - .comptime_float, .enum_literal, .type_info, // These are function bodies, not function pointers. .function, + .null, + .undefined, => true, .inferred_alloc_mut => unreachable, @@ -6286,19 +6383,12 @@ pub const Type = struct { c_ulong, c_longlong, c_ulonglong, - c_longdouble, - f16, - f32, - f64, - f80, - f128, anyopaque, bool, void, type, anyerror, comptime_int, - comptime_float, noreturn, @"anyframe", null, @@ -6377,7 +6467,6 @@ pub const Type = struct { pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; pub fn Type(comptime t: Tag) type { - // Keep in sync with tools/stage2_pretty_printers_common.py return switch (t) { .u1, .u8, @@ -6402,19 +6491,12 @@ pub const Type = struct { .c_ulong, .c_longlong, .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, .anyopaque, .bool, .void, .type, .anyerror, .comptime_int, - .comptime_float, .noreturn, .enum_literal, .null, @@ -6781,16 +6863,17 @@ pub const Type = struct { pub const @"i32" = initTag(.i32); pub const @"i64" = initTag(.i64); - pub const @"f16" = initTag(.f16); - pub const @"f32" = initTag(.f32); - pub const @"f64" = initTag(.f64); - pub const @"f80" = initTag(.f80); - pub const @"f128" = initTag(.f128); + pub const @"f16": Type = .{ .ip_index = .f16_type, .legacy = undefined }; + pub const @"f32": Type = .{ .ip_index = .f32_type, .legacy = undefined }; + pub const @"f64": Type = .{ .ip_index = .f64_type, .legacy = undefined }; + pub const @"f80": Type = .{ .ip_index = .f80_type, .legacy = undefined }; + pub const @"f128": Type = .{ .ip_index = .f128_type, .legacy = undefined }; pub const @"bool" = initTag(.bool); pub const @"usize" = initTag(.usize); pub const @"isize" = initTag(.isize); - pub const @"comptime_int" = initTag(.comptime_int); + pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type, .legacy = undefined }; + pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type, .legacy = undefined }; pub const @"void" = initTag(.void); pub const @"type" = initTag(.type); pub const @"anyerror" = initTag(.anyerror); @@ -6798,6 +6881,8 @@ pub const Type = struct { pub const @"null" = initTag(.null); pub const @"noreturn" = initTag(.noreturn); + pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; + pub const err_int = Type.u16; pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { diff --git a/src/value.zig b/src/value.zig index 7f0e6006f0..8912209d5e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -974,19 +974,19 @@ pub const Value = struct { .c_ulong_type => Type.initTag(.c_ulong), .c_longlong_type => Type.initTag(.c_longlong), .c_ulonglong_type => Type.initTag(.c_ulonglong), - .c_longdouble_type => Type.initTag(.c_longdouble), - .f16_type => Type.initTag(.f16), - .f32_type => Type.initTag(.f32), - .f64_type => Type.initTag(.f64), - .f80_type => Type.initTag(.f80), - .f128_type => Type.initTag(.f128), + .c_longdouble_type => Type.c_longdouble, + .f16_type => Type.f16, + .f32_type => Type.f32, + .f64_type => Type.f64, + .f80_type => Type.f80, + .f128_type => Type.f128, .anyopaque_type => Type.initTag(.anyopaque), .bool_type => Type.initTag(.bool), .void_type => Type.initTag(.void), .type_type => Type.initTag(.type), .anyerror_type => Type.initTag(.anyerror), .comptime_int_type => Type.initTag(.comptime_int), - .comptime_float_type => Type.initTag(.comptime_float), + .comptime_float_type => Type.comptime_float, .noreturn_type => Type.initTag(.noreturn), .null_type => Type.initTag(.null), .undefined_type => Type.initTag(.undefined), -- cgit v1.2.3 From bcd4bb8afbea84d86fd8758b581b141e7086b16b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 16:33:16 -0700 Subject: stage2: move named int types to InternPool --- src/Sema.zig | 51 +------- src/arch/aarch64/CodeGen.zig | 6 +- src/arch/arm/CodeGen.zig | 10 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +- src/codegen.zig | 2 +- src/codegen/c/type.zig | 24 ++-- src/codegen/spirv.zig | 2 +- src/type.zig | 301 +++++++++---------------------------------- src/value.zig | 64 ++++----- 10 files changed, 121 insertions(+), 345 deletions(-) (limited to 'src/arch') diff --git a/src/Sema.zig b/src/Sema.zig index f93ceb19e6..d03460385e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6448,7 +6448,7 @@ fn checkCallArgumentCount( .Optional => { var buf: Type.Payload.ElemType = undefined; const opt_child = callee_ty.optionalChild(&buf); - if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer() and + if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and opt_child.childType().zigTypeTag(mod) == .Fn)) { const msg = msg: { @@ -31421,6 +31421,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -31443,17 +31445,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -31798,6 +31789,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .bool_false => unreachable, .empty_struct => unreachable, .generic_poison => unreachable, + .var_args_param_type => unreachable, .type_info_type => return sema.getBuiltinType("Type"), .extern_options_type => return sema.getBuiltinType("ExternOptions"), @@ -32977,17 +32969,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .type, .anyerror, @@ -33213,17 +33194,6 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .i64 => return .i64_type, .u128 => return .u128_type, .i128 => return .i128_type, - .usize => return .usize_type, - .isize => return .isize_type, - .c_char => return .c_char_type, - .c_short => return .c_short_type, - .c_ushort => return .c_ushort_type, - .c_int => return .c_int_type, - .c_uint => return .c_uint_type, - .c_long => return .c_long_type, - .c_ulong => return .c_ulong_type, - .c_longlong => return .c_longlong_type, - .c_ulonglong => return .c_ulonglong_type, .anyopaque => return .anyopaque_type, .bool => return .bool_type, .void => return .void_type, @@ -33664,6 +33634,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_literal, .type_info, => true, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -33686,17 +33658,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 2846633275..4671866197 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4326,7 +4326,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; @@ -4353,7 +4353,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const got_addr = p9.bases.data; const got_index = decl_block.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; - try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); + try self.genSetReg(Type.usize, .x30, .{ .memory = fn_got_addr }); } else unreachable; _ = try self.addInst(.{ @@ -5968,7 +5968,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index eb8cfa9707..ca4a3826aa 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4308,7 +4308,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |_| { unreachable; // unsupported architecture for MachO } else { @@ -4326,7 +4326,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); - try self.genSetReg(Type.initTag(.usize), .lr, mcv); + try self.genSetReg(Type.usize, .lr, mcv); } // TODO: add Instruction.supportedOn @@ -5694,7 +5694,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (extra_offset) { const offset = if (off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off })); _ = try self.addInst(.{ .tag = tag, @@ -5710,7 +5710,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } else { const offset = if (off <= math.maxInt(u12)) blk: { break :blk Instruction.Offset.imm(@intCast(u12, off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }), .none); _ = try self.addInst(.{ .tag = tag, @@ -5916,7 +5916,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - 4, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - 4, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 4ab798fe9c..488b937141 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1749,7 +1749,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jalr, .data = .{ .i_type = .{ diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index e79a216315..343cc2f90e 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -883,7 +883,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1352,7 +1352,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file)); } else unreachable; - try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jmpl, diff --git a/src/codegen.zig b/src/codegen.zig index 6846bebe6b..295409781e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -371,7 +371,7 @@ pub fn generateSymbol( // generate length switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.initTag(.usize), + .ty = Type.usize, .val = slice.len, }, code, debug_output, reloc_info)) { .ok => {}, diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index b964d16bd9..d248753670 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1359,18 +1359,18 @@ pub const CType = extern union { self.* = undefined; if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) self.init(.void) - else if (ty.isAbiInt(mod)) switch (ty.tag()) { - .usize => self.init(.uintptr_t), - .isize => self.init(.intptr_t), - .c_char => self.init(.char), - .c_short => self.init(.short), - .c_ushort => self.init(.@"unsigned short"), - .c_int => self.init(.int), - .c_uint => self.init(.@"unsigned int"), - .c_long => self.init(.long), - .c_ulong => self.init(.@"unsigned long"), - .c_longlong => self.init(.@"long long"), - .c_ulonglong => self.init(.@"unsigned long long"), + else if (ty.isAbiInt(mod)) switch (ty.ip_index) { + .usize_type => self.init(.uintptr_t), + .isize_type => self.init(.intptr_t), + .c_char_type => self.init(.char), + .c_short_type => self.init(.short), + .c_ushort_type => self.init(.@"unsigned short"), + .c_int_type => self.init(.int), + .c_uint_type => self.init(.@"unsigned int"), + .c_long_type => self.init(.long), + .c_ulong_type => self.init(.@"unsigned long"), + .c_longlong_type => self.init(.@"long long"), + .c_ulonglong_type => self.init(.@"unsigned long long"), else => switch (tagFromIntInfo(ty.intInfo(mod))) { .void => unreachable, else => |t| self.init(t), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 41abbde1a0..90c2d93458 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2499,7 +2499,7 @@ pub const DeclGen = struct { const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ty_ref = try self.resolveType(elem_ty, .direct); const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace())); - if (ptr_ty.isSinglePointer()) { + if (ptr_ty.isSinglePointer(mod)) { // Pointer-to-array. In this case, the resulting pointer is not of the same type // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain. return try self.accessChain(elem_ptr_ty_ref, ptr_id, &.{index_id}); diff --git a/src/type.zig b/src/type.zig index 205a732710..4e8d0b9e20 100644 --- a/src/type.zig +++ b/src/type.zig @@ -121,17 +121,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, => return .Int, .error_set, @@ -621,19 +610,6 @@ pub const Type = struct { switch (a.tag()) { .generic_poison => unreachable, - // Detect that e.g. u64 != usize, even if the bits match on a particular target. - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .bool, .void, .type, @@ -1013,22 +989,6 @@ pub const Type = struct { switch (ty.tag()) { .generic_poison => unreachable, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - => |ty_tag| { - std.hash.autoHash(hasher, std.builtin.TypeId.Int); - std.hash.autoHash(hasher, ty_tag); - }, - .bool => std.hash.autoHash(hasher, std.builtin.TypeId.Bool), .void => std.hash.autoHash(hasher, std.builtin.TypeId.Void), .type => std.hash.autoHash(hasher, std.builtin.TypeId.Type), @@ -1345,17 +1305,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -1631,17 +1580,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -2020,17 +1958,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -2322,17 +2249,6 @@ pub const Type = struct { .i32 => return Value.initTag(.i32_type), .u64 => return Value.initTag(.u64_type), .i64 => return Value.initTag(.i64_type), - .usize => return Value.initTag(.usize_type), - .isize => return Value.initTag(.isize_type), - .c_char => return Value.initTag(.c_char_type), - .c_short => return Value.initTag(.c_short_type), - .c_ushort => return Value.initTag(.c_ushort_type), - .c_int => return Value.initTag(.c_int_type), - .c_uint => return Value.initTag(.c_uint_type), - .c_long => return Value.initTag(.c_long_type), - .c_ulong => return Value.initTag(.c_ulong_type), - .c_longlong => return Value.initTag(.c_longlong_type), - .c_ulonglong => return Value.initTag(.c_ulonglong_type), .anyopaque => return Value.initTag(.anyopaque_type), .bool => return Value.initTag(.bool_type), .void => return Value.initTag(.void_type), @@ -2462,17 +2378,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .anyerror, .const_slice_u8, @@ -2713,6 +2618,8 @@ pub const Type = struct { .type_info, .generic_poison, => false, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -2734,17 +2641,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .void, .manyptr_u8, @@ -3040,7 +2936,7 @@ pub const Type = struct { .export_options, .extern_options, .@"anyframe", - => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, @@ -3089,6 +2985,7 @@ pub const Type = struct { .noreturn => unreachable, .generic_poison => unreachable, + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -3130,8 +3027,6 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; }, - .isize, - .usize, .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -3153,16 +3048,6 @@ pub const Type = struct { .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - .c_char => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.char) }, - .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) }, - .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) }, - .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) }, - .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) }, - .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) }, - .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) }, - .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) }, - .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) }, - // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, .anyerror, @@ -3491,7 +3376,7 @@ pub const Type = struct { .usize, .isize, .@"anyframe", - => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, @@ -3524,6 +3409,7 @@ pub const Type = struct { .type_info => unreachable, .noreturn => unreachable, .generic_poison => unreachable, + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -3666,8 +3552,6 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = result }; }, - .isize, - .usize, .@"anyframe", .anyframe_T, .optional_single_const_pointer, @@ -3694,16 +3578,6 @@ pub const Type = struct { else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, }, - .c_char => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.char) }, - .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) }, - .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) }, - .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) }, - .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) }, - .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) }, - .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) }, - .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) }, - .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) }, - // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, .anyerror, @@ -3856,7 +3730,7 @@ pub const Type = struct { .usize, .isize, .@"anyframe", - => return target.cpu.arch.ptrBitWidth(), + => return target.ptrBitWidth(), .c_char => return target.c_type_bit_size(.char), .c_short => return target.c_type_bit_size(.short), @@ -3896,6 +3770,7 @@ pub const Type = struct { .export_options => unreachable, // missing call to resolveTypeFields .extern_options => unreachable, // missing call to resolveTypeFields .type_info => unreachable, // missing call to resolveTypeFields + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -4000,8 +3875,6 @@ pub const Type = struct { return payload.len * 8 * elem_size + elem_bit_size; }, - .isize, - .usize, .@"anyframe", .anyframe_T, => return target.ptrBitWidth(), @@ -4040,16 +3913,6 @@ pub const Type = struct { .manyptr_const_u8_sentinel_0, => return target.ptrBitWidth(), - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .error_set, .error_set_single, .anyerror_void_error_union, @@ -4876,12 +4739,6 @@ pub const Type = struct { }; return switch (ty.tag()) { .i8, - .isize, - .c_char, - .c_short, - .c_int, - .c_long, - .c_longlong, .i16, .i32, .i64, @@ -4903,11 +4760,6 @@ pub const Type = struct { else => return false, }; return switch (ty.tag()) { - .usize, - .c_ushort, - .c_uint, - .c_ulong, - .c_ulonglong, .u1, .u8, .u16, @@ -4938,13 +4790,26 @@ pub const Type = struct { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| return int_type, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), + .ptr_type => unreachable, + .array_type => unreachable, .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), - .error_union_type => @panic("TODO"), - .simple_type => @panic("TODO"), - .struct_type => unreachable, + .optional_type => unreachable, + .error_union_type => unreachable, + .simple_type => |t| switch (t) { + .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, + .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, + .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, + .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, + .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, + .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, + .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, + .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, + .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, + .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, + .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, + else => unreachable, + }, + .struct_type => @panic("TODO"), .union_type => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -4965,17 +4830,6 @@ pub const Type = struct { .i64 => return .{ .signedness = .signed, .bits = 64 }, .u128 => return .{ .signedness = .unsigned, .bits = 128 }, .i128 => return .{ .signedness = .signed, .bits = 128 }, - .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, - .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, - .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, - .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, - .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, - .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, - .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, - .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, - .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, - .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, - .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, @@ -5003,19 +4857,19 @@ pub const Type = struct { }; } - pub fn isNamedInt(self: Type) bool { - return switch (self.tag()) { - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, + pub fn isNamedInt(ty: Type) bool { + return switch (ty.ip_index) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, => true, else => false, @@ -5180,17 +5034,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, => true, else => false, @@ -5284,17 +5127,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .bool, .type, .anyerror, @@ -5502,6 +5334,8 @@ pub const Type = struct { .enum_literal, .type_info, => true, + + .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -5524,17 +5358,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -6372,17 +6195,6 @@ pub const Type = struct { i64, u128, i128, - usize, - isize, - c_char, - c_short, - c_ushort, - c_int, - c_uint, - c_long, - c_ulong, - c_longlong, - c_ulonglong, anyopaque, bool, void, @@ -6480,17 +6292,6 @@ pub const Type = struct { .i64, .u128, .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, .anyopaque, .bool, .void, @@ -6859,9 +6660,13 @@ pub const Type = struct { pub const @"u29" = initTag(.u29); pub const @"u32" = initTag(.u32); pub const @"u64" = initTag(.u64); + pub const @"u128" = initTag(.u128); + pub const @"i8" = initTag(.i8); + pub const @"i16" = initTag(.i16); pub const @"i32" = initTag(.i32); pub const @"i64" = initTag(.i64); + pub const @"i128" = initTag(.i128); pub const @"f16": Type = .{ .ip_index = .f16_type, .legacy = undefined }; pub const @"f32": Type = .{ .ip_index = .f32_type, .legacy = undefined }; @@ -6870,8 +6675,8 @@ pub const Type = struct { pub const @"f128": Type = .{ .ip_index = .f128_type, .legacy = undefined }; pub const @"bool" = initTag(.bool); - pub const @"usize" = initTag(.usize); - pub const @"isize" = initTag(.isize); + pub const @"usize": Type = .{ .ip_index = .usize_type, .legacy = undefined }; + pub const @"isize": Type = .{ .ip_index = .isize_type, .legacy = undefined }; pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type, .legacy = undefined }; pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type, .legacy = undefined }; pub const @"void" = initTag(.void); @@ -6879,8 +6684,18 @@ pub const Type = struct { pub const @"anyerror" = initTag(.anyerror); pub const @"anyopaque" = initTag(.anyopaque); pub const @"null" = initTag(.null); + pub const @"undefined" = initTag(.undefined); pub const @"noreturn" = initTag(.noreturn); + pub const @"c_char": Type = .{ .ip_index = .c_char_type, .legacy = undefined }; + pub const @"c_short": Type = .{ .ip_index = .c_short_type, .legacy = undefined }; + pub const @"c_ushort": Type = .{ .ip_index = .c_ushort_type, .legacy = undefined }; + pub const @"c_int": Type = .{ .ip_index = .c_int_type, .legacy = undefined }; + pub const @"c_uint": Type = .{ .ip_index = .c_uint_type, .legacy = undefined }; + pub const @"c_long": Type = .{ .ip_index = .c_long_type, .legacy = undefined }; + pub const @"c_ulong": Type = .{ .ip_index = .c_ulong_type, .legacy = undefined }; + pub const @"c_longlong": Type = .{ .ip_index = .c_longlong_type, .legacy = undefined }; + pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; pub const err_int = Type.u16; diff --git a/src/value.zig b/src/value.zig index 8912209d5e..b0484dfc76 100644 --- a/src/value.zig +++ b/src/value.zig @@ -951,45 +951,45 @@ pub const Value = struct { } return switch (self.tag()) { .ty => self.castTag(.ty).?.data, - .u1_type => Type.initTag(.u1), - .u8_type => Type.initTag(.u8), - .i8_type => Type.initTag(.i8), - .u16_type => Type.initTag(.u16), - .i16_type => Type.initTag(.i16), - .u29_type => Type.initTag(.u29), - .u32_type => Type.initTag(.u32), - .i32_type => Type.initTag(.i32), - .u64_type => Type.initTag(.u64), - .i64_type => Type.initTag(.i64), - .u128_type => Type.initTag(.u128), - .i128_type => Type.initTag(.i128), - .usize_type => Type.initTag(.usize), - .isize_type => Type.initTag(.isize), - .c_char_type => Type.initTag(.c_char), - .c_short_type => Type.initTag(.c_short), - .c_ushort_type => Type.initTag(.c_ushort), - .c_int_type => Type.initTag(.c_int), - .c_uint_type => Type.initTag(.c_uint), - .c_long_type => Type.initTag(.c_long), - .c_ulong_type => Type.initTag(.c_ulong), - .c_longlong_type => Type.initTag(.c_longlong), - .c_ulonglong_type => Type.initTag(.c_ulonglong), + .u1_type => Type.u1, + .u8_type => Type.u8, + .i8_type => Type.i8, + .u16_type => Type.u16, + .i16_type => Type.i16, + .u29_type => Type.u29, + .u32_type => Type.u32, + .i32_type => Type.i32, + .u64_type => Type.u64, + .i64_type => Type.i64, + .u128_type => Type.u128, + .i128_type => Type.i128, + .usize_type => Type.usize, + .isize_type => Type.isize, + .c_char_type => Type.c_char, + .c_short_type => Type.c_short, + .c_ushort_type => Type.c_ushort, + .c_int_type => Type.c_int, + .c_uint_type => Type.c_uint, + .c_long_type => Type.c_long, + .c_ulong_type => Type.c_ulong, + .c_longlong_type => Type.c_longlong, + .c_ulonglong_type => Type.c_ulonglong, .c_longdouble_type => Type.c_longdouble, .f16_type => Type.f16, .f32_type => Type.f32, .f64_type => Type.f64, .f80_type => Type.f80, .f128_type => Type.f128, - .anyopaque_type => Type.initTag(.anyopaque), - .bool_type => Type.initTag(.bool), - .void_type => Type.initTag(.void), - .type_type => Type.initTag(.type), - .anyerror_type => Type.initTag(.anyerror), - .comptime_int_type => Type.initTag(.comptime_int), + .anyopaque_type => Type.anyopaque, + .bool_type => Type.bool, + .void_type => Type.void, + .type_type => Type.type, + .anyerror_type => Type.anyerror, + .comptime_int_type => Type.comptime_int, .comptime_float_type => Type.comptime_float, - .noreturn_type => Type.initTag(.noreturn), - .null_type => Type.initTag(.null), - .undefined_type => Type.initTag(.undefined), + .noreturn_type => Type.noreturn, + .null_type => Type.null, + .undefined_type => Type.undefined, .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), .anyframe_type => Type.initTag(.@"anyframe"), .const_slice_u8_type => Type.initTag(.const_slice_u8), -- cgit v1.2.3 From 836d8a1f64cb811641e621799429c54f222717eb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 18:11:07 -0700 Subject: stage2: move most simple types to InternPool --- src/Air.zig | 4 +- src/Module.zig | 51 +-- src/Sema.zig | 503 ++++++++++++----------------- src/arch/aarch64/CodeGen.zig | 7 +- src/arch/arm/CodeGen.zig | 7 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/wasm/CodeGen.zig | 10 +- src/arch/x86_64/CodeGen.zig | 8 +- src/codegen/c.zig | 16 +- src/codegen/llvm.zig | 38 ++- src/codegen/spirv.zig | 6 +- src/print_air.zig | 1 - src/type.zig | 739 ++++++++----------------------------------- src/value.zig | 28 +- 14 files changed, 435 insertions(+), 985 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index be3ae119e4..1bc9d949e2 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1333,7 +1333,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .ret_load, .unreach, .trap, - => return Type.initTag(.noreturn), + => return Type.noreturn, .breakpoint, .dbg_stmt, @@ -1370,7 +1370,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .wasm_memory_grow => return Type.i32, .wasm_memory_size => return Type.u32, - .bool_to_int => return Type.initTag(.u1), + .bool_to_int => return Type.u1, .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0), diff --git a/src/Module.zig b/src/Module.zig index a4ae107bed..77c20fbcc6 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1005,7 +1005,7 @@ pub const Struct = struct { /// If the layout is packed, this is the backing integer type of the packed struct. /// Whether zig chooses this type or the user specifies it, it is stored here. /// This will be set to the noreturn type until status is `have_layout`. - backing_int_ty: Type = Type.initTag(.noreturn), + backing_int_ty: Type = Type.noreturn, status: enum { none, field_types_wip, @@ -1705,31 +1705,34 @@ pub const Fn = struct { is_resolved: bool = false, pub fn addErrorSet(self: *InferredErrorSet, gpa: Allocator, err_set_ty: Type) !void { - switch (err_set_ty.tag()) { - .error_set => { - const names = err_set_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - try self.errors.put(gpa, name, {}); - } - }, - .error_set_single => { - const name = err_set_ty.castTag(.error_set_single).?.data; - try self.errors.put(gpa, name, {}); - }, - .error_set_inferred => { - const ies = err_set_ty.castTag(.error_set_inferred).?.data; - try self.inferred_error_sets.put(gpa, ies, {}); + switch (err_set_ty.ip_index) { + .anyerror_type => { + self.is_anyerror = true; }, - .error_set_merged => { - const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { + .none => switch (err_set_ty.tag()) { + .error_set => { + const names = err_set_ty.castTag(.error_set).?.data.names.keys(); + for (names) |name| { + try self.errors.put(gpa, name, {}); + } + }, + .error_set_single => { + const name = err_set_ty.castTag(.error_set_single).?.data; try self.errors.put(gpa, name, {}); - } - }, - .anyerror => { - self.is_anyerror = true; + }, + .error_set_inferred => { + const ies = err_set_ty.castTag(.error_set_inferred).?.data; + try self.inferred_error_sets.put(gpa, ies, {}); + }, + .error_set_merged => { + const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); + for (names) |name| { + try self.errors.put(gpa, name, {}); + } + }, + else => unreachable, }, - else => unreachable, + else => @panic("TODO"), } } }; @@ -4566,7 +4569,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { const struct_obj = try new_decl_arena_allocator.create(Module.Struct); const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); - const ty_ty = comptime Type.initTag(.type); + const ty_ty = comptime Type.type; struct_obj.* = .{ .owner_decl = undefined, // set below .fields = .{}, diff --git a/src/Sema.zig b/src/Sema.zig index d03460385e..ea8258717b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1776,7 +1776,7 @@ fn analyzeAsType( src: LazySrcLoc, air_inst: Air.Inst.Ref, ) !Type { - const wanted_type = Type.initTag(.type); + const wanted_type = Type.type; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known"); const ty = val.toType(); @@ -3132,7 +3132,7 @@ fn zirUnionDecl( errdefer mod.abortAnonDecl(new_decl_index); union_obj.* = .{ .owner_decl = new_decl_index, - .tag_ty = Type.initTag(.null), + .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = small.layout, @@ -6362,7 +6362,7 @@ fn zirCall( if (arg_index >= fn_params_len) break :inst Air.Inst.Ref.var_args_param_type; - if (func_ty_info.param_types[arg_index].tag() == .generic_poison) + if (func_ty_info.param_types[arg_index].isGenericPoison()) break :inst Air.Inst.Ref.generic_poison_type; break :inst try sema.addType(func_ty_info.param_types[arg_index]); @@ -8175,7 +8175,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); // Anything merged with anyerror is anyerror. - if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { + if (lhs_ty.ip_index == .anyerror_type or rhs_ty.ip_index == .anyerror_type) { return Air.Inst.Ref.anyerror_type; } @@ -8206,7 +8206,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); return sema.addConstant( - Type.initTag(.enum_literal), + .{ .ip_index = .enum_literal_type, .legacy = undefined }, try Value.Tag.enum_literal.create(sema.arena, duped_name), ); } @@ -8503,6 +8503,7 @@ fn analyzeErrUnionPayload( operand_src: LazySrcLoc, safety_check: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const payload_ty = err_union_ty.errorUnionPayload(); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { if (val.getError()) |name| { @@ -8516,7 +8517,7 @@ fn analyzeErrUnionPayload( // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty()) + !err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err, .is_non_err); } @@ -8602,7 +8603,7 @@ fn analyzeErrUnionPayloadPtr( // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty()) + !err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr); } @@ -8701,7 +8702,7 @@ fn zirFunc( break :blk ret_ty; } else |err| switch (err) { error.GenericPoison => { - break :blk Type.initTag(.generic_poison); + break :blk Type.generic_poison; }, else => |e| return e, } @@ -8778,7 +8779,7 @@ fn resolveGenericBody( }; switch (err) { error.GenericPoison => { - if (dest_ty.tag() == .type) { + if (dest_ty.ip_index == .type_type) { return Value.initTag(.generic_poison_type); } else { return Value.initTag(.generic_poison); @@ -9319,7 +9320,7 @@ fn zirParam( // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9340,7 +9341,7 @@ fn zirParam( // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9438,7 +9439,7 @@ fn zirParamAnytype( // We are evaluating a generic function without any comptime args provided. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -18877,7 +18878,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in }, .ErrorSet => { const payload_val = union_val.val.optionalValue(mod) orelse - return sema.addType(Type.initTag(.anyerror)); + return sema.addType(Type.anyerror); const slice_val = payload_val.castTag(.slice).?.data; const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod)); @@ -19150,7 +19151,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in errdefer mod.abortAnonDecl(new_decl_index); union_obj.* = .{ .owner_decl = new_decl_index, - .tag_ty = Type.initTag(.null), + .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = layout, @@ -22697,7 +22698,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += 1; const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, "return type must be comptime-known") catch |err| switch (err) { error.GenericPoison => { - break :blk Type.initTag(.generic_poison); + break :blk Type.generic_poison; }, else => |e| return e, }; @@ -23022,7 +23023,7 @@ fn zirBuiltinExtern( new_decl.src_line = sema.owner_decl.src_line; // We only access this decl through the decl_ref with the correct type created // below, so this type doesn't matter - new_decl.ty = Type.Tag.init(.anyopaque); + new_decl.ty = Type.anyopaque; new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var); new_decl.@"align" = 0; new_decl.@"linksection" = null; @@ -24380,9 +24381,8 @@ fn fieldCallBind( decl_type.fnParamLen() >= 1) { const first_param_type = decl_type.fnParamType(0); - const first_param_tag = first_param_type.tag(); // zig fmt: off - if (first_param_tag == .generic_poison or ( + if (first_param_type.isGenericPoison() or ( first_param_type.zigTypeTag(mod) == .Pointer and (first_param_type.ptrSize() == .One or first_param_type.ptrSize() == .C) and @@ -25535,10 +25535,7 @@ fn coerceExtra( inst_src: LazySrcLoc, opts: CoerceOpts, ) CoersionError!Air.Inst.Ref { - switch (dest_ty_unresolved.tag()) { - .generic_poison => return inst, - else => {}, - } + if (dest_ty_unresolved.isGenericPoison()) return inst; const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst)); @@ -25577,7 +25574,8 @@ fn coerceExtra( // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer - if (dest_ty.isPtrLikeOptional(mod) and dest_ty.elemType2(mod).tag() == .anyopaque and + if (dest_ty.isPtrLikeOptional(mod) and + dest_ty.elemType2(mod).ip_index == .anyopaque_type and inst_ty.isPtrAtRuntime(mod)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; @@ -25715,7 +25713,7 @@ fn coerceExtra( // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer - if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { + if (dest_info.pointee_type.ip_index == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const elem_ty = inst_ty.elemType2(mod); if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { @@ -26759,6 +26757,8 @@ fn coerceInMemoryAllowedErrorSets( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { + const mod = sema.mod; + // Coercion to `anyerror`. Note that this check can return false negatives // in case the error sets did not get resolved. if (dest_ty.isAnyError()) { @@ -26769,36 +26769,41 @@ fn coerceInMemoryAllowedErrorSets( const dst_ies = dst_payload.data; // We will make an effort to return `ok` without resolving either error set, to // avoid unnecessary "unable to resolve error set" dependency loop errors. - switch (src_ty.tag()) { - .error_set_inferred => { - // If both are inferred error sets of functions, and - // the dest includes the source function, the coercion is OK. - // This check is important because it works without forcing a full resolution - // of inferred error sets. - const src_ies = src_ty.castTag(.error_set_inferred).?.data; - - if (dst_ies.inferred_error_sets.contains(src_ies)) { - return .ok; - } - }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dst_ies.errors.contains(name)) return .ok; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; + switch (src_ty.ip_index) { + .none => switch (src_ty.tag()) { + .error_set_inferred => { + // If both are inferred error sets of functions, and + // the dest includes the source function, the coercion is OK. + // This check is important because it works without forcing a full resolution + // of inferred error sets. + const src_ies = src_ty.castTag(.error_set_inferred).?.data; + + if (dst_ies.inferred_error_sets.contains(src_ies)) { + return .ok; + } + }, + .error_set_single => { + const name = src_ty.castTag(.error_set_single).?.data; + if (dst_ies.errors.contains(name)) return .ok; + }, + .error_set_merged => { + const names = src_ty.castTag(.error_set_merged).?.data.keys(); + for (names) |name| { + if (!dst_ies.errors.contains(name)) break; + } else return .ok; + }, + .error_set => { + const names = src_ty.castTag(.error_set).?.data.names.keys(); + for (names) |name| { + if (!dst_ies.errors.contains(name)) break; + } else return .ok; + }, + else => unreachable, }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; + .anyerror_type => {}, + else => switch (mod.intern_pool.indexToKey(src_ty.ip_index)) { + else => @panic("TODO"), }, - .anyerror => {}, - else => unreachable, } if (dst_ies.func == sema.owner_func) { @@ -26818,79 +26823,87 @@ fn coerceInMemoryAllowedErrorSets( var missing_error_buf = std.ArrayList([]const u8).init(sema.gpa); defer missing_error_buf.deinit(); - switch (src_ty.tag()) { - .error_set_inferred => { - const src_data = src_ty.castTag(.error_set_inferred).?.data; + switch (src_ty.ip_index) { + .none => switch (src_ty.tag()) { + .error_set_inferred => { + const src_data = src_ty.castTag(.error_set_inferred).?.data; - try sema.resolveInferredErrorSet(block, src_src, src_data); - // src anyerror status might have changed after the resolution. - if (src_ty.isAnyError()) { - // dest_ty.isAnyError() == true is already checked for at this point. - return .from_anyerror; - } + try sema.resolveInferredErrorSet(block, src_src, src_data); + // src anyerror status might have changed after the resolution. + if (src_ty.isAnyError()) { + // dest_ty.isAnyError() == true is already checked for at this point. + return .from_anyerror; + } - for (src_data.errors.keys()) |key| { - if (!dest_ty.errorSetHasField(key)) { - try missing_error_buf.append(key); + for (src_data.errors.keys()) |key| { + if (!dest_ty.errorSetHasField(key)) { + try missing_error_buf.append(key); + } } - } - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } + if (missing_error_buf.items.len != 0) { + return InMemoryCoercionResult{ + .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), + }; + } - return .ok; - }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dest_ty.errorSetHasField(name)) { return .ok; - } - const list = try sema.arena.alloc([]const u8, 1); - list[0] = name; - return InMemoryCoercionResult{ .missing_error = list }; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { - try missing_error_buf.append(name); + }, + .error_set_single => { + const name = src_ty.castTag(.error_set_single).?.data; + if (dest_ty.errorSetHasField(name)) { + return .ok; + } + const list = try sema.arena.alloc([]const u8, 1); + list[0] = name; + return InMemoryCoercionResult{ .missing_error = list }; + }, + .error_set_merged => { + const names = src_ty.castTag(.error_set_merged).?.data.keys(); + for (names) |name| { + if (!dest_ty.errorSetHasField(name)) { + try missing_error_buf.append(name); + } } - } - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } + if (missing_error_buf.items.len != 0) { + return InMemoryCoercionResult{ + .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), + }; + } - return .ok; - }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { - try missing_error_buf.append(name); + return .ok; + }, + .error_set => { + const names = src_ty.castTag(.error_set).?.data.names.keys(); + for (names) |name| { + if (!dest_ty.errorSetHasField(name)) { + try missing_error_buf.append(name); + } } - } - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } + if (missing_error_buf.items.len != 0) { + return InMemoryCoercionResult{ + .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), + }; + } - return .ok; - }, - .anyerror => switch (dest_ty.tag()) { - .error_set_inferred => unreachable, // Caught by dest_ty.isAnyError() above. - .error_set_single, .error_set_merged, .error_set => return .from_anyerror, - .anyerror => unreachable, // Filtered out above. + return .ok; + }, else => unreachable, }, - else => unreachable, + + .anyerror_type => switch (dest_ty.ip_index) { + .none => switch (dest_ty.tag()) { + .error_set_inferred => unreachable, // Caught by dest_ty.isAnyError() above. + .error_set_single, .error_set_merged, .error_set => return .from_anyerror, + else => unreachable, + }, + .anyerror_type => unreachable, // Filtered out above. + else => @panic("TODO"), + }, + + else => @panic("TODO"), } unreachable; @@ -29355,42 +29368,49 @@ fn analyzeIsNonErrComptimeOnly( // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. const set_ty = operand_ty.errorUnionSet(); - switch (set_ty.tag()) { - .anyerror => {}, - .error_set_inferred => blk: { - // If the error set is empty, we must return a comptime true or false. - // However we want to avoid unnecessarily resolving an inferred error set - // in case it is already non-empty. - const ies = set_ty.castTag(.error_set_inferred).?.data; - if (ies.is_anyerror) break :blk; - if (ies.errors.count() != 0) break :blk; - if (maybe_operand_val == null) { - // Try to avoid resolving inferred error set if possible. - if (ies.errors.count() != 0) break :blk; + switch (set_ty.ip_index) { + .none => switch (set_ty.tag()) { + .error_set_inferred => blk: { + // If the error set is empty, we must return a comptime true or false. + // However we want to avoid unnecessarily resolving an inferred error set + // in case it is already non-empty. + const ies = set_ty.castTag(.error_set_inferred).?.data; if (ies.is_anyerror) break :blk; - for (ies.inferred_error_sets.keys()) |other_ies| { - if (ies == other_ies) continue; - try sema.resolveInferredErrorSet(block, src, other_ies); - if (other_ies.is_anyerror) { - ies.is_anyerror = true; - ies.is_resolved = true; - break :blk; - } + if (ies.errors.count() != 0) break :blk; + if (maybe_operand_val == null) { + // Try to avoid resolving inferred error set if possible. + if (ies.errors.count() != 0) break :blk; + if (ies.is_anyerror) break :blk; + for (ies.inferred_error_sets.keys()) |other_ies| { + if (ies == other_ies) continue; + try sema.resolveInferredErrorSet(block, src, other_ies); + if (other_ies.is_anyerror) { + ies.is_anyerror = true; + ies.is_resolved = true; + break :blk; + } - if (other_ies.errors.count() != 0) break :blk; - } - if (ies.func == sema.owner_func) { - // We're checking the inferred errorset of the current function and none of - // its child inferred error sets contained any errors meaning that any value - // so far with this type can't contain errors either. - return Air.Inst.Ref.bool_true; + if (other_ies.errors.count() != 0) break :blk; + } + if (ies.func == sema.owner_func) { + // We're checking the inferred errorset of the current function and none of + // its child inferred error sets contained any errors meaning that any value + // so far with this type can't contain errors either. + return Air.Inst.Ref.bool_true; + } + try sema.resolveInferredErrorSet(block, src, ies); + if (ies.is_anyerror) break :blk; + if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; } - try sema.resolveInferredErrorSet(block, src, ies); - if (ies.is_anyerror) break :blk; - if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; - } + }, + else => if (set_ty.errorSetNames().len == 0) return Air.Inst.Ref.bool_true, + }, + + .anyerror_type => {}, + + else => switch (mod.intern_pool.indexToKey(set_ty.ip_index)) { + else => @panic("TODO"), }, - else => if (set_ty.errorSetNames().len == 0) return Air.Inst.Ref.bool_true, } if (maybe_operand_val) |err_union| { @@ -30308,43 +30328,48 @@ fn wrapErrorUnionSet( const inst_ty = sema.typeOf(inst); const dest_err_set_ty = dest_ty.errorUnionSet(); if (try sema.resolveMaybeUndefVal(inst)) |val| { - switch (dest_err_set_ty.tag()) { - .anyerror => {}, - .error_set_single => ok: { - const expected_name = val.castTag(.@"error").?.data.name; - const n = dest_err_set_ty.castTag(.error_set_single).?.data; - if (mem.eql(u8, expected_name, n)) break :ok; - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - }, - .error_set => { - const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set).?.data; - if (!error_set.names.contains(expected_name)) { + switch (dest_err_set_ty.ip_index) { + .anyerror_type => {}, + + .none => switch (dest_err_set_ty.tag()) { + .error_set_single => ok: { + const expected_name = val.castTag(.@"error").?.data.name; + const n = dest_err_set_ty.castTag(.error_set_single).?.data; + if (mem.eql(u8, expected_name, n)) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - } - }, - .error_set_inferred => ok: { - const expected_name = val.castTag(.@"error").?.data.name; - const ies = dest_err_set_ty.castTag(.error_set_inferred).?.data; - - // We carefully do this in an order that avoids unnecessarily - // resolving the destination error set type. - if (ies.is_anyerror) break :ok; - if (ies.errors.contains(expected_name)) break :ok; - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { - break :ok; - } + }, + .error_set => { + const expected_name = val.castTag(.@"error").?.data.name; + const error_set = dest_err_set_ty.castTag(.error_set).?.data; + if (!error_set.names.contains(expected_name)) { + return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); + } + }, + .error_set_inferred => ok: { + const expected_name = val.castTag(.@"error").?.data.name; + const ies = dest_err_set_ty.castTag(.error_set_inferred).?.data; + + // We carefully do this in an order that avoids unnecessarily + // resolving the destination error set type. + if (ies.is_anyerror) break :ok; + if (ies.errors.contains(expected_name)) break :ok; + if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { + break :ok; + } - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - }, - .error_set_merged => { - const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data; - if (!error_set.contains(expected_name)) { return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - } + }, + .error_set_merged => { + const expected_name = val.castTag(.@"error").?.data.name; + const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data; + if (!error_set.contains(expected_name)) { + return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); + } + }, + else => unreachable, }, - else => unreachable, + + else => @panic("TODO"), } return sema.addConstant(dest_ty, val); } @@ -30380,7 +30405,7 @@ fn resolvePeerTypes( ) !Type { const mod = sema.mod; switch (instructions.len) { - 0 => return Type.initTag(.noreturn), + 0 => return Type.noreturn, 1 => return sema.typeOf(instructions[0]), else => {}, } @@ -31445,24 +31470,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .null, - .undefined, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, + .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -31476,17 +31484,12 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_inferred, .error_set_merged, .@"opaque", - .generic_poison, .array_u8, .array_u8_sentinel_0, .enum_simple, => false, .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .enum_literal, - .type_info, .function, => true, @@ -31709,17 +31712,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { try sema.resolveTypeFieldsUnion(ty, union_obj); return ty; }, - .type_info => return sema.getBuiltinType("Type"), - .extern_options => return sema.getBuiltinType("ExternOptions"), - .export_options => return sema.getBuiltinType("ExportOptions"), - .atomic_order => return sema.getBuiltinType("AtomicOrder"), - .atomic_rmw_op => return sema.getBuiltinType("AtomicRmwOp"), - .calling_convention => return sema.getBuiltinType("CallingConvention"), - .address_space => return sema.getBuiltinType("AddressSpace"), - .float_mode => return sema.getBuiltinType("FloatMode"), - .reduce_op => return sema.getBuiltinType("ReduceOp"), - .modifier => return sema.getBuiltinType("CallModifier"), - .prefetch_options => return sema.getBuiltinType("PrefetchOptions"), else => return ty, }, @@ -31772,6 +31764,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .const_slice_u8_type, .anyerror_void_error_union_type, .generic_poison_type, + .var_args_param_type, .empty_struct_type, => return ty, @@ -31789,7 +31782,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .bool_false => unreachable, .empty_struct => unreachable, .generic_poison => unreachable, - .var_args_param_type => unreachable, .type_info_type => return sema.getBuiltinType("Type"), .extern_options_type => return sema.getBuiltinType("ExternOptions"), @@ -32118,7 +32110,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void return sema.failWithOwnedErrorMsg(msg); } gop.value_ptr.* = .{ - .ty = Type.initTag(.noreturn), + .ty = Type.noreturn, .abi_align = 0, .default_val = Value.initTag(.unreachable_value), .is_comptime = is_comptime, @@ -32552,7 +32544,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const field_ty: Type = if (!has_type) Type.void else if (field_type_ref == .none) - Type.initTag(.noreturn) + Type.noreturn else sema.resolveType(&block_scope, .unneeded, field_type_ref) catch |err| switch (err) { error.NeededSourceLocation => { @@ -32956,7 +32948,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }; switch (ty.tag()) { - .comptime_int, .u1, .u8, .i8, @@ -32969,9 +32960,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .i64, .u128, .i128, - .bool, - .type, - .anyerror, + .error_set_single, .error_set, .error_set_merged, @@ -32984,28 +32973,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .const_slice_u8_sentinel_0, .const_slice, .mut_slice, - .anyopaque, .optional_single_mut_pointer, .optional_single_const_pointer, - .enum_literal, .anyerror_void_error_union, .error_set_inferred, .@"opaque", .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", .anyframe_T, .many_const_pointer, .many_mut_pointer, @@ -33138,10 +33113,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .void => return Value.void, - .noreturn => return Value.initTag(.unreachable_value), - .null => return Value.null, - .undefined => return Value.initTag(.undef), .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) @@ -33154,7 +33125,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .generic_poison => return error.GenericPoison, } } @@ -33194,34 +33164,12 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .i64 => return .i64_type, .u128 => return .u128_type, .i128 => return .i128_type, - .anyopaque => return .anyopaque_type, - .bool => return .bool_type, - .void => return .void_type, - .type => return .type_type, - .anyerror => return .anyerror_type, - .comptime_int => return .comptime_int_type, - .noreturn => return .noreturn_type, - .@"anyframe" => return .anyframe_type, - .null => return .null_type, - .undefined => return .undefined_type, - .enum_literal => return .enum_literal_type, - .atomic_order => return .atomic_order_type, - .atomic_rmw_op => return .atomic_rmw_op_type, - .calling_convention => return .calling_convention_type, - .address_space => return .address_space_type, - .float_mode => return .float_mode_type, - .reduce_op => return .reduce_op_type, - .modifier => return .call_modifier_type, - .prefetch_options => return .prefetch_options_type, - .export_options => return .export_options_type, - .extern_options => return .extern_options_type, - .type_info => return .type_info_type, + .manyptr_u8 => return .manyptr_u8_type, .manyptr_const_u8 => return .manyptr_const_u8_type, .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, .const_slice_u8 => return .const_slice_u8_type, .anyerror_void_error_union => return .anyerror_void_error_union_type, - .generic_poison => return .generic_poison_type, else => {}, } try sema.air_instructions.append(sema.gpa, .{ @@ -33658,22 +33606,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, + .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -33687,19 +33620,12 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_inferred, .error_set_merged, .@"opaque", - .generic_poison, .array_u8, .array_u8_sentinel_0, .enum_simple, => false, .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .enum_literal, - .null, - .undefined, - .type_info, .function, => true, @@ -34476,17 +34402,6 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { const tag_ty = try mod.intType(.unsigned, bits); return sema.intInRange(tag_ty, int, fields_len); }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - => unreachable, else => unreachable, } diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 4671866197..7098cf3f32 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3071,7 +3071,7 @@ fn errUnionErr( const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -3151,7 +3151,7 @@ fn errUnionPayload( const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4905,9 +4905,10 @@ fn isErr( error_union_bind: ReadArg.Bind, error_union_ty: Type, ) !MCValue { + const mod = self.bin_file.options.module.?; const error_type = error_union_ty.errorUnionSet(); - if (error_type.errorSetIsEmpty()) { + if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index ca4a3826aa..bf94cf55a0 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2047,7 +2047,7 @@ fn errUnionErr( const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2124,7 +2124,7 @@ fn errUnionPayload( const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4882,9 +4882,10 @@ fn isErr( error_union_bind: ReadArg.Bind, error_union_ty: Type, ) !MCValue { + const mod = self.bin_file.options.module.?; const error_type = error_union_ty.errorUnionSet(); - if (error_type.errorSetIsEmpty()) { + if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 343cc2f90e..a519b73235 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3530,7 +3530,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) const mod = self.bin_file.options.module.?; const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { return error_union_mcv; } if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index bbba43d265..2c1e8aa36d 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1036,8 +1036,8 @@ fn genValtype(ty: Type, mod: *Module) u8 { /// Differently from `genValtype` this also allows `void` to create a block /// with no return type fn genBlockType(ty: Type, mod: *Module) u8 { - return switch (ty.tag()) { - .void, .noreturn => wasm.block_empty, + return switch (ty.ip_index) { + .void_type, .noreturn_type => wasm.block_empty, else => genValtype(ty, mod), }; } @@ -3948,7 +3948,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro const pl_ty = err_union_ty.errorUnionPayload(); const result = result: { - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { switch (opcode) { .i32_ne => break :result WValue{ .imm32 = 0 }, .i32_eq => break :result WValue{ .imm32 = 1 }, @@ -4013,7 +4013,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) const payload_ty = err_ty.errorUnionPayload(); const result = result: { - if (err_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_ty.errorUnionSet().errorSetIsEmpty(mod)) { break :result WValue{ .imm32 = 0 }; } @@ -6214,7 +6214,7 @@ fn lowerTry( const pl_ty = err_union_ty.errorUnionPayload(); const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { // Block we can jump out of when error is not set try func.startBlock(.block, wasm.block_empty); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 865ebe02f7..3e0ca4831b 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3624,7 +3624,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { break :result MCValue{ .immediate = 0 }; } @@ -5811,7 +5811,7 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8)); - const int_info = if (src_ty.tag() == .bool) + const int_info = if (src_ty.ip_index == .bool_type) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else src_ty.intInfo(mod); @@ -8716,7 +8716,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } - assert(some_info.ty.tag() == .bool); + assert(some_info.ty.ip_index == .bool_type); const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod)); try self.asmRegisterImmediate( .{ ._, .bt }, @@ -8808,7 +8808,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! const mod = self.bin_file.options.module.?; const err_type = ty.errorUnionSet(); - if (err_type.errorSetIsEmpty()) { + if (err_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f4daa56a6d..2e5e45d54c 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1508,7 +1508,7 @@ pub const DeclGen = struct { } if (fn_decl.val.castTag(.function)) |func_payload| if (func_payload.data.is_cold) try w.writeAll("zig_cold "); - if (fn_info.return_type.tag() == .noreturn) try w.writeAll("zig_noreturn "); + if (fn_info.return_type.ip_index == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( dg.decl_index, @@ -3783,7 +3783,7 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(mod); - if (scalar_ty.tag() != .bool) return try airUnBuiltinCall(f, inst, "not", .bits); + if (scalar_ty.ip_index != .bool_type) return try airUnBuiltinCall(f, inst, "not", .bits); const op = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -4292,7 +4292,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const inst_ty = f.typeOfIndex(inst); - const result = if (inst_ty.tag() != .void and !f.liveness.isUnused(inst)) + const result = if (inst_ty.ip_index != .void_type and !f.liveness.isUnused(inst)) try f.allocLocal(inst, inst_ty) else .none; @@ -4354,7 +4354,7 @@ fn lowerTry( const payload_ty = err_union_ty.errorUnionPayload(); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { try writer.writeAll("if ("); if (!payload_has_bits) { if (is_ptr) @@ -5549,7 +5549,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { if (!payload_ty.hasRuntimeBits(mod)) { try f.writeCValue(writer, operand, .Other); } else { - if (!error_ty.errorSetIsEmpty()) + if (!error_ty.errorSetIsEmpty(mod)) if (operand_is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) else @@ -5768,7 +5768,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); - if (!error_ty.errorSetIsEmpty()) + if (!error_ty.errorSetIsEmpty(mod)) if (payload_ty.hasRuntimeBits(mod)) if (is_ptr) try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" }) @@ -6032,7 +6032,7 @@ fn airCmpBuiltinCall( try writer.writeByte(')'); if (!ref_ret) try writer.print(" {s} {}", .{ compareOperatorC(operator), - try f.fmtIntLiteral(Type.initTag(.i32), Value.zero), + try f.fmtIntLiteral(Type.i32, Value.zero), }); try writer.writeAll(";\n"); try v.end(f, inst, writer); @@ -7749,7 +7749,7 @@ const LowerFnRetTyBuffer = struct { payload: Type.Payload.AnonStruct, }; fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) Type { - if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.initTag(.noreturn); + if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.noreturn; if (lowersToArray(ret_ty, mod)) { buffer.names = [1][]const u8{"array"}; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ce78b06f2e..232cd9d42f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1730,7 +1730,7 @@ pub const Object = struct { return ptr_di_ty; }, .Opaque => { - if (ty.tag() == .anyopaque) { + if (ty.ip_index == .anyopaque_type) { const di_ty = dib.createBasicType("anyopaque", 0, DW.ATE.signed); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; @@ -2847,25 +2847,23 @@ pub const DeclGen = struct { const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target); return dg.context.pointerType(llvm_addrspace); }, - .Opaque => switch (t.tag()) { - .@"opaque" => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); - if (gop.found_existing) return gop.value_ptr.*; + .Opaque => { + if (t.ip_index == .anyopaque_type) return dg.context.intType(8); - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); + if (gop.found_existing) return gop.value_ptr.*; - const opaque_obj = t.castTag(.@"opaque").?.data; - const name = try opaque_obj.getFullyQualifiedName(dg.module); - defer gpa.free(name); + // The Type memory is ephemeral; since we want to store a longer-lived + // reference, we need to copy it here. + gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const llvm_struct_ty = dg.context.structCreateNamed(name); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - return llvm_struct_ty; - }, - .anyopaque => return dg.context.intType(8), - else => unreachable, + const opaque_obj = t.castTag(.@"opaque").?.data; + const name = try opaque_obj.getFullyQualifiedName(dg.module); + defer gpa.free(name); + + const llvm_struct_ty = dg.context.structCreateNamed(name); + gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + return llvm_struct_ty; }, .Array => { const elem_ty = t.childType(); @@ -5531,7 +5529,7 @@ pub const FuncGen = struct { const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_union_llvm_ty = try fg.dg.lowerType(err_union_ty); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const is_err = err: { const err_set_ty = try fg.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); @@ -6715,7 +6713,7 @@ pub const FuncGen = struct { const err_set_ty = try self.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const llvm_i1 = self.context.intType(1); switch (op) { .EQ => return llvm_i1.constInt(1, .False), // 0 == 0 @@ -6864,7 +6862,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); if (operand_is_ptr) { return operand; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 90c2d93458..3a5f5d6f6a 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2849,6 +2849,7 @@ pub const DeclGen = struct { } fn airTry(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const err_union_id = try self.resolve(pl_op.operand); const extra = self.air.extraData(Air.Try, pl_op.payload); @@ -2862,7 +2863,7 @@ pub const DeclGen = struct { const eu_layout = self.errorUnionLayout(payload_ty); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const err_id = if (eu_layout.payload_has_bits) try self.extractField(Type.anyerror, err_union_id, eu_layout.errorFieldIndex()) else @@ -2910,12 +2911,13 @@ pub const DeclGen = struct { fn airErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { // No error possible, so just return undefined. return try self.spv.constUndef(err_ty_ref); } diff --git a/src/print_air.zig b/src/print_air.zig index 39a244e11f..f4a1aeae32 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -370,7 +370,6 @@ const Writer = struct { switch (t) { .inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"), .inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"), - .generic_poison => try s.writeAll("(generic_poison)"), else => try ty.print(s, w.module), } } diff --git a/src/type.zig b/src/type.zig index 4e8d0b9e20..7db7ad316b 100644 --- a/src/type.zig +++ b/src/type.zig @@ -85,9 +85,9 @@ pub const Type = struct { .address_space, .float_mode, .reduce_op, + .call_modifier, => return .Enum, - .call_modifier, .prefetch_options, .export_options, .extern_options, @@ -95,7 +95,7 @@ pub const Type = struct { .type_info => return .Union, - .generic_poison => unreachable, + .generic_poison => return error.GenericPoison, .var_args_param => unreachable, }, @@ -107,8 +107,6 @@ pub const Type = struct { } } switch (ty.tag()) { - .generic_poison => return error.GenericPoison, - .u1, .u8, .i8, @@ -125,19 +123,11 @@ pub const Type = struct { .error_set, .error_set_single, - .anyerror, .error_set_inferred, .error_set_merged, => return .ErrorSet, - .anyopaque, .@"opaque" => return .Opaque, - .bool => return .Bool, - .void => return .Void, - .type => return .Type, - .comptime_int => return .ComptimeInt, - .noreturn => return .NoReturn, - .null => return .Null, - .undefined => return .Undefined, + .@"opaque" => return .Opaque, .function => return .Fn, @@ -172,18 +162,14 @@ pub const Type = struct { .optional_single_const_pointer, .optional_single_mut_pointer, => return .Optional, - .enum_literal => return .EnumLiteral, .anyerror_void_error_union, .error_union => return .ErrorUnion, - .anyframe_T, .@"anyframe" => return .AnyFrame, + .anyframe_T => return .AnyFrame, .empty_struct, .empty_struct_literal, .@"struct", - .prefetch_options, - .export_options, - .extern_options, .tuple, .anon_struct, => return .Struct, @@ -192,19 +178,11 @@ pub const Type = struct { .enum_nonexhaustive, .enum_simple, .enum_numbered, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, => return .Enum, .@"union", .union_safety_tagged, .union_tagged, - .type_info, => return .Union, } } @@ -393,7 +371,7 @@ pub const Type = struct { pub fn ptrInfo(self: Type) Payload.Pointer { switch (self.tag()) { .single_const_pointer_to_comptime_int => return .{ .data = .{ - .pointee_type = Type.initTag(.comptime_int), + .pointee_type = Type.comptime_int, .sentinel = null, .@"align" = 0, .@"addrspace" = .generic, @@ -405,7 +383,7 @@ pub const Type = struct { .size = .One, } }, .const_slice_u8 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), + .pointee_type = Type.u8, .sentinel = null, .@"align" = 0, .@"addrspace" = .generic, @@ -417,7 +395,7 @@ pub const Type = struct { .size = .Slice, } }, .const_slice_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), + .pointee_type = Type.u8, .sentinel = Value.zero, .@"align" = 0, .@"addrspace" = .generic, @@ -465,7 +443,7 @@ pub const Type = struct { .size = .Many, } }, .manyptr_const_u8 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), + .pointee_type = Type.u8, .sentinel = null, .@"align" = 0, .@"addrspace" = .generic, @@ -477,7 +455,7 @@ pub const Type = struct { .size = .Many, } }, .manyptr_const_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), + .pointee_type = Type.u8, .sentinel = Value.zero, .@"align" = 0, .@"addrspace" = .generic, @@ -501,7 +479,7 @@ pub const Type = struct { .size = .Many, } }, .manyptr_u8 => return .{ .data = .{ - .pointee_type = Type.initTag(.u8), + .pointee_type = Type.u8, .sentinel = null, .@"align" = 0, .@"addrspace" = .generic, @@ -608,23 +586,6 @@ pub const Type = struct { if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { - .generic_poison => unreachable, - - .bool, - .void, - .type, - .comptime_int, - .noreturn, - .null, - .undefined, - .anyopaque, - .@"anyframe", - .enum_literal, - => |a_tag| { - assert(a_tag != b.tag()); // because of the comparison at the top of the function. - return false; - }, - .u1, .u8, .i8, @@ -653,10 +614,6 @@ pub const Type = struct { return a_ies == b_ies; }, - .anyerror => { - return b.tag() == .anyerror; - }, - .error_set, .error_set_single, .error_set_merged, @@ -927,13 +884,6 @@ pub const Type = struct { return true; }, - // we can't compare these based on tags because it wouldn't detect if, - // for example, a was resolved into .@"struct" but b was one of these tags. - .prefetch_options, - .export_options, - .extern_options, - => unreachable, // needed to resolve the type before now - .enum_full, .enum_nonexhaustive => { const a_enum_obj = a.cast(Payload.EnumFull).?.data; const b_enum_obj = (b.cast(Payload.EnumFull) orelse return false).data; @@ -949,26 +899,12 @@ pub const Type = struct { const b_enum_obj = (b.cast(Payload.EnumNumbered) orelse return false).data; return a_enum_obj == b_enum_obj; }, - // we can't compare these based on tags because it wouldn't detect if, - // for example, a was resolved into .enum_simple but b was one of these tags. - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - => unreachable, // needed to resolve the type before now .@"union", .union_safety_tagged, .union_tagged => { const a_union_obj = a.cast(Payload.Union).?.data; const b_union_obj = (b.cast(Payload.Union) orelse return false).data; return a_union_obj == b_union_obj; }, - // we can't compare these based on tags because it wouldn't detect if, - // for example, a was resolved into .union_tagged but b was one of these tags. - .type_info => unreachable, // needed to resolve the type before now - } } @@ -987,31 +923,6 @@ pub const Type = struct { return; } switch (ty.tag()) { - .generic_poison => unreachable, - - .bool => std.hash.autoHash(hasher, std.builtin.TypeId.Bool), - .void => std.hash.autoHash(hasher, std.builtin.TypeId.Void), - .type => std.hash.autoHash(hasher, std.builtin.TypeId.Type), - .comptime_int => std.hash.autoHash(hasher, std.builtin.TypeId.ComptimeInt), - .noreturn => std.hash.autoHash(hasher, std.builtin.TypeId.NoReturn), - .null => std.hash.autoHash(hasher, std.builtin.TypeId.Null), - .undefined => std.hash.autoHash(hasher, std.builtin.TypeId.Undefined), - - .anyopaque => { - std.hash.autoHash(hasher, std.builtin.TypeId.Opaque); - std.hash.autoHash(hasher, Tag.anyopaque); - }, - - .@"anyframe" => { - std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); - std.hash.autoHash(hasher, Tag.@"anyframe"); - }, - - .enum_literal => { - std.hash.autoHash(hasher, std.builtin.TypeId.EnumLiteral); - std.hash.autoHash(hasher, Tag.enum_literal); - }, - .u1, .u8, .i8, @@ -1046,12 +957,6 @@ pub const Type = struct { for (names) |name| hasher.update(name); }, - .anyerror => { - // anyerror is distinct from other error sets - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorSet); - std.hash.autoHash(hasher, Tag.anyerror); - }, - .error_set_inferred => { // inferred error sets are compared using their data pointer const ies: *Module.Fn.InferredErrorSet = ty.castTag(.error_set_inferred).?.data; @@ -1209,12 +1114,6 @@ pub const Type = struct { } }, - // we can't hash these based on tags because they wouldn't match the expanded version. - .prefetch_options, - .export_options, - .extern_options, - => unreachable, // needed to resolve the type before now - .enum_full, .enum_nonexhaustive => { const enum_obj: *const Module.EnumFull = ty.cast(Payload.EnumFull).?.data; std.hash.autoHash(hasher, std.builtin.TypeId.Enum); @@ -1230,24 +1129,12 @@ pub const Type = struct { std.hash.autoHash(hasher, std.builtin.TypeId.Enum); std.hash.autoHash(hasher, enum_obj); }, - // we can't hash these based on tags because they wouldn't match the expanded version. - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - => unreachable, // needed to resolve the type before now .@"union", .union_safety_tagged, .union_tagged => { const union_obj: *const Module.Union = ty.cast(Payload.Union).?.data; std.hash.autoHash(hasher, std.builtin.TypeId.Union); std.hash.autoHash(hasher, union_obj); }, - // we can't hash these based on tags because they wouldn't match the expanded version. - .type_info => unreachable, // needed to resolve the type before now - } } @@ -1305,19 +1192,9 @@ pub const Type = struct { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .comptime_int, - .noreturn, - .null, - .undefined, .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, - .enum_literal, .anyerror_void_error_union, .inferred_alloc_const, .inferred_alloc_mut, @@ -1325,19 +1202,6 @@ pub const Type = struct { .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", - .generic_poison, => unreachable, .array_u8, @@ -1580,20 +1444,8 @@ pub const Type = struct { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .@"anyframe", - .comptime_int, - .noreturn, => return writer.writeAll(@tagName(t)), - .enum_literal => return writer.writeAll("@Type(.EnumLiteral)"), - .null => return writer.writeAll("@Type(.Null)"), - .undefined => return writer.writeAll("@Type(.Undefined)"), - .empty_struct, .empty_struct_literal => return writer.writeAll("struct {}"), .@"struct" => { @@ -1640,17 +1492,6 @@ pub const Type = struct { .manyptr_u8 => return writer.writeAll("[*]u8"), .manyptr_const_u8 => return writer.writeAll("[*]const u8"), .manyptr_const_u8_sentinel_0 => return writer.writeAll("[*:0]const u8"), - .atomic_order => return writer.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op => return writer.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention => return writer.writeAll("std.builtin.CallingConvention"), - .address_space => return writer.writeAll("std.builtin.AddressSpace"), - .float_mode => return writer.writeAll("std.builtin.FloatMode"), - .reduce_op => return writer.writeAll("std.builtin.ReduceOp"), - .modifier => return writer.writeAll("std.builtin.CallModifier"), - .prefetch_options => return writer.writeAll("std.builtin.PrefetchOptions"), - .export_options => return writer.writeAll("std.builtin.ExportOptions"), - .extern_options => return writer.writeAll("std.builtin.ExternOptions"), - .type_info => return writer.writeAll("std.builtin.Type"), .function => { const payload = ty.castTag(.function).?.data; try writer.writeAll("fn("); @@ -1889,7 +1730,6 @@ pub const Type = struct { }, .inferred_alloc_const => return writer.writeAll("(inferred_alloc_const)"), .inferred_alloc_mut => return writer.writeAll("(inferred_alloc_mut)"), - .generic_poison => return writer.writeAll("(generic poison)"), } unreachable; } @@ -1931,20 +1771,6 @@ pub const Type = struct { switch (t) { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, - - // TODO get rid of these Type.Tag values. - .atomic_order => unreachable, - .atomic_rmw_op => unreachable, - .calling_convention => unreachable, - .address_space => unreachable, - .float_mode => unreachable, - .reduce_op => unreachable, - .modifier => unreachable, - .prefetch_options => unreachable, - .export_options => unreachable, - .extern_options => unreachable, - .type_info => unreachable, .u1, .u8, @@ -1958,19 +1784,8 @@ pub const Type = struct { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .@"anyframe", - .comptime_int, - .noreturn, => try writer.writeAll(@tagName(t)), - .enum_literal => try writer.writeAll("@TypeOf(.enum_literal)"), - .null => try writer.writeAll("@TypeOf(null)"), - .undefined => try writer.writeAll("@TypeOf(undefined)"), .empty_struct_literal => try writer.writeAll("@TypeOf(.{})"), .empty_struct => { @@ -2249,34 +2064,12 @@ pub const Type = struct { .i32 => return Value.initTag(.i32_type), .u64 => return Value.initTag(.u64_type), .i64 => return Value.initTag(.i64_type), - .anyopaque => return Value.initTag(.anyopaque_type), - .bool => return Value.initTag(.bool_type), - .void => return Value.initTag(.void_type), - .type => return Value.initTag(.type_type), - .anyerror => return Value.initTag(.anyerror_type), - .@"anyframe" => return Value.initTag(.anyframe_type), - .comptime_int => return Value.initTag(.comptime_int_type), - .noreturn => return Value.initTag(.noreturn_type), - .null => return Value.initTag(.null_type), - .undefined => return Value.initTag(.undefined_type), .single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type), .const_slice_u8 => return Value.initTag(.const_slice_u8_type), .const_slice_u8_sentinel_0 => return Value.initTag(.const_slice_u8_sentinel_0_type), - .enum_literal => return Value.initTag(.enum_literal_type), .manyptr_u8 => return Value.initTag(.manyptr_u8_type), .manyptr_const_u8 => return Value.initTag(.manyptr_const_u8_type), .manyptr_const_u8_sentinel_0 => return Value.initTag(.manyptr_const_u8_sentinel_0_type), - .atomic_order => return Value.initTag(.atomic_order_type), - .atomic_rmw_op => return Value.initTag(.atomic_rmw_op_type), - .calling_convention => return Value.initTag(.calling_convention_type), - .address_space => return Value.initTag(.address_space_type), - .float_mode => return Value.initTag(.float_mode_type), - .reduce_op => return Value.initTag(.reduce_op_type), - .modifier => return Value.initTag(.modifier_type), - .prefetch_options => return Value.initTag(.prefetch_options_type), - .export_options => return Value.initTag(.export_options_type), - .extern_options => return Value.initTag(.extern_options_type), - .type_info => return Value.initTag(.type_info_type), .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, else => return Value.Tag.ty.create(allocator, self), @@ -2378,8 +2171,7 @@ pub const Type = struct { .i64, .u128, .i128, - .bool, - .anyerror, + .const_slice_u8, .const_slice_u8_sentinel_0, .array_u8_sentinel_0, @@ -2388,18 +2180,7 @@ pub const Type = struct { .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .@"anyframe", - .anyopaque, + .@"opaque", .error_set_single, .error_union, @@ -2435,16 +2216,8 @@ pub const Type = struct { // These are false because they are comptime-only types. .single_const_pointer_to_comptime_int, - .void, - .type, - .comptime_int, - .noreturn, - .null, - .undefined, - .enum_literal, .empty_struct, .empty_struct_literal, - .type_info, // These are function *bodies*, not pointers. // Special exceptions have to be made when emitting functions due to // this returning false. @@ -2558,7 +2331,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, } } @@ -2641,8 +2413,7 @@ pub const Type = struct { .i64, .u128, .i128, - .bool, - .void, + .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -2662,32 +2433,11 @@ pub const Type = struct { .optional_single_const_pointer, => true, - .anyopaque, - .anyerror, - .noreturn, - .null, - .@"anyframe", - .undefined, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, .error_set, .error_set_single, .error_set_inferred, .error_set_merged, .@"opaque", - .generic_poison, - .type, - .comptime_int, - .enum_literal, - .type_info, // These are function bodies, not function pointers. .function, .const_slice_u8, @@ -2773,7 +2523,6 @@ pub const Type = struct { else => return false, @enumToInt(InternPool.Index.none) => switch (ty.tag()) { - .noreturn => return true, .error_set => { const err_set_obj = ty.castTag(.error_set).?.data; const names = err_set_obj.names.keys(); @@ -3003,21 +2752,10 @@ pub const Type = struct { .u1, .u8, .i8, - .bool, + .array_u8_sentinel_0, .array_u8, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, .@"opaque", - .anyopaque, => return AbiAlignmentAdvanced{ .scalar = 1 }, // represents machine code; not a pointer @@ -3044,13 +2782,11 @@ pub const Type = struct { .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .@"anyframe", .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, - .anyerror, .error_set_inferred, .error_set_single, .error_set, @@ -3229,22 +2965,12 @@ pub const Type = struct { }, .empty_struct, - .void, .empty_struct_literal, - .type, - .comptime_int, - .null, - .undefined, - .enum_literal, - .type_info, => return AbiAlignmentAdvanced{ .scalar = 0 }, - .noreturn, .inferred_alloc_const, .inferred_alloc_mut, => unreachable, - - .generic_poison => unreachable, } } @@ -3422,26 +3148,12 @@ pub const Type = struct { switch (ty.tag()) { .function => unreachable, // represents machine code; not a pointer .@"opaque" => unreachable, // no size available - .noreturn => unreachable, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, - .modifier => unreachable, // missing call to resolveTypeFields - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - .type_info => unreachable, // missing call to resolveTypeFields - - .anyopaque, - .type, - .comptime_int, - .null, - .undefined, - .enum_literal, + .single_const_pointer_to_comptime_int, .empty_struct_literal, .empty_struct, - .void, => return AbiSizeAdvanced{ .scalar = 0 }, .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) { @@ -3496,13 +3208,6 @@ pub const Type = struct { .u1, .u8, .i8, - .bool, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, => return AbiSizeAdvanced{ .scalar = 1 }, .array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data }, @@ -3552,7 +3257,6 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = result }; }, - .@"anyframe", .anyframe_T, .optional_single_const_pointer, .optional_single_mut_pointer, @@ -3580,7 +3284,6 @@ pub const Type = struct { // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, - .anyerror, .error_set_inferred, .error_set, .error_set_merged, @@ -3758,6 +3461,7 @@ pub const Type = struct { .undefined => unreachable, .enum_literal => unreachable, .generic_poison => unreachable, + .var_args_param => unreachable, .atomic_order => unreachable, // missing call to resolveTypeFields .atomic_rmw_op => unreachable, // missing call to resolveTypeFields @@ -3770,7 +3474,6 @@ pub const Type = struct { .export_options => unreachable, // missing call to resolveTypeFields .extern_options => unreachable, // missing call to resolveTypeFields .type_info => unreachable, // missing call to resolveTypeFields - .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), @@ -3784,23 +3487,14 @@ pub const Type = struct { switch (ty.tag()) { .function => unreachable, // represents machine code; not a pointer - .anyopaque => unreachable, - .type => unreachable, - .comptime_int => unreachable, - .noreturn => unreachable, - .null => unreachable, - .undefined => unreachable, - .enum_literal => unreachable, .single_const_pointer_to_comptime_int => unreachable, .empty_struct => unreachable, .empty_struct_literal => unreachable, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, .@"opaque" => unreachable, - .generic_poison => unreachable, - .void => return 0, - .bool, .u1 => return 1, + .u1 => return 1, .u8, .i8 => return 8, .i16, .u16 => return 16, .u29 => return 29, @@ -3875,9 +3569,7 @@ pub const Type = struct { return payload.len * 8 * elem_size + elem_bit_size; }, - .@"anyframe", - .anyframe_T, - => return target.ptrBitWidth(), + .anyframe_T => return target.ptrBitWidth(), .const_slice, .mut_slice, @@ -3916,7 +3608,6 @@ pub const Type = struct { .error_set, .error_set_single, .anyerror_void_error_union, - .anyerror, .error_set_inferred, .error_set_merged, => return 16, // TODO revisit this when we have the concept of the error tag type @@ -3926,19 +3617,6 @@ pub const Type = struct { // includes padding bits. return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; }, - - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => @panic("TODO at some point we gotta resolve builtin types"), } } @@ -4326,7 +4004,7 @@ pub const Type = struct { .manyptr_const_u8_sentinel_0, => Type.u8, - .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int), + .single_const_pointer_to_comptime_int => Type.comptime_int, .pointer => ty.castTag(.pointer).?.data.pointee_type, else => unreachable, @@ -4372,7 +4050,7 @@ pub const Type = struct { .manyptr_const_u8_sentinel_0, => Type.u8, - .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int), + .single_const_pointer_to_comptime_int => Type.comptime_int, .pointer => { const info = ty.castTag(.pointer).?.data; const child_ty = info.pointee_type; @@ -4387,7 +4065,6 @@ pub const Type = struct { .optional_single_const_pointer => ty.castPointer().?.data, .anyframe_T => ty.castTag(.anyframe_T).?.data, - .@"anyframe" => Type.void, else => unreachable, }; @@ -4468,19 +4145,6 @@ pub const Type = struct { return union_obj.tag_ty; }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // needed to call resolveTypeFields first - else => null, }; } @@ -4495,19 +4159,6 @@ pub const Type = struct { return union_obj.tag_ty; }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // needed to call resolveTypeFields first - else => null, }; } @@ -4572,7 +4223,7 @@ pub const Type = struct { /// Asserts that the type is an error union. pub fn errorUnionPayload(self: Type) Type { return switch (self.tag()) { - .anyerror_void_error_union => Type.initTag(.void), + .anyerror_void_error_union => Type.void, .error_union => self.castTag(.error_union).?.data.payload, else => unreachable, }; @@ -4580,33 +4231,38 @@ pub const Type = struct { pub fn errorUnionSet(self: Type) Type { return switch (self.tag()) { - .anyerror_void_error_union => Type.initTag(.anyerror), + .anyerror_void_error_union => Type.anyerror, .error_union => self.castTag(.error_union).?.data.error_set, else => unreachable, }; } /// Returns false for unresolved inferred error sets. - pub fn errorSetIsEmpty(ty: Type) bool { - switch (ty.tag()) { - .anyerror => return false, - .error_set_inferred => { - const inferred_error_set = ty.castTag(.error_set_inferred).?.data; - // Can't know for sure. - if (!inferred_error_set.is_resolved) return false; - if (inferred_error_set.is_anyerror) return false; - return inferred_error_set.errors.count() == 0; - }, - .error_set_single => return false, - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - return err_set_obj.names.count() == 0; + pub fn errorSetIsEmpty(ty: Type, mod: *const Module) bool { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .error_set_inferred => { + const inferred_error_set = ty.castTag(.error_set_inferred).?.data; + // Can't know for sure. + if (!inferred_error_set.is_resolved) return false; + if (inferred_error_set.is_anyerror) return false; + return inferred_error_set.errors.count() == 0; + }, + .error_set_single => return false, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + return err_set_obj.names.count() == 0; + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + return name_map.count() == 0; + }, + else => unreachable, }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - return name_map.count() == 0; + .anyerror_type => return false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => @panic("TODO"), }, - else => unreachable, } } @@ -4614,9 +4270,13 @@ pub const Type = struct { /// Note that the result may be a false negative if the type did not get error set /// resolution prior to this call. pub fn isAnyError(ty: Type) bool { - return switch (ty.tag()) { - .anyerror => true, - .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror, + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror, + else => false, + }, + .anyerror_type => true, + // TODO handle error_set_inferred here else => false, }; } @@ -4788,72 +4448,75 @@ pub const Type = struct { const target = mod.getTarget(); var ty = starting_ty; - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type, - .ptr_type => unreachable, - .array_type => unreachable, - .vector_type => @panic("TODO"), - .optional_type => unreachable, - .error_union_type => unreachable, - .simple_type => |t| switch (t) { - .usize => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, - .isize => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, - .c_char => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, - .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, - .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, - .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, - .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, - .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, - .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, - .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, - .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, - else => unreachable, - }, - .struct_type => @panic("TODO"), - .union_type => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .enum_tag => unreachable, // it's a value, not a type - }; + while (true) switch (ty.ip_index) { + .none => switch (ty.tag()) { + .u1 => return .{ .signedness = .unsigned, .bits = 1 }, + .u8 => return .{ .signedness = .unsigned, .bits = 8 }, + .i8 => return .{ .signedness = .signed, .bits = 8 }, + .u16 => return .{ .signedness = .unsigned, .bits = 16 }, + .i16 => return .{ .signedness = .signed, .bits = 16 }, + .u29 => return .{ .signedness = .unsigned, .bits = 29 }, + .u32 => return .{ .signedness = .unsigned, .bits = 32 }, + .i32 => return .{ .signedness = .signed, .bits = 32 }, + .u64 => return .{ .signedness = .unsigned, .bits = 64 }, + .i64 => return .{ .signedness = .signed, .bits = 64 }, + .u128 => return .{ .signedness = .unsigned, .bits = 128 }, + .i128 => return .{ .signedness = .signed, .bits = 128 }, + + .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, + .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, + .enum_simple => { + const enum_obj = ty.castTag(.enum_simple).?.data; + const field_count = enum_obj.fields.count(); + if (field_count == 0) return .{ .signedness = .unsigned, .bits = 0 }; + return .{ .signedness = .unsigned, .bits = smallestUnsignedBits(field_count - 1) }; + }, - while (true) switch (ty.tag()) { - .u1 => return .{ .signedness = .unsigned, .bits = 1 }, - .u8 => return .{ .signedness = .unsigned, .bits = 8 }, - .i8 => return .{ .signedness = .signed, .bits = 8 }, - .u16 => return .{ .signedness = .unsigned, .bits = 16 }, - .i16 => return .{ .signedness = .signed, .bits = 16 }, - .u29 => return .{ .signedness = .unsigned, .bits = 29 }, - .u32 => return .{ .signedness = .unsigned, .bits = 32 }, - .i32 => return .{ .signedness = .signed, .bits = 32 }, - .u64 => return .{ .signedness = .unsigned, .bits = 64 }, - .i64 => return .{ .signedness = .signed, .bits = 64 }, - .u128 => return .{ .signedness = .unsigned, .bits = 128 }, - .i128 => return .{ .signedness = .signed, .bits = 128 }, - - .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, - .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, - .enum_simple => { - const enum_obj = ty.castTag(.enum_simple).?.data; - const field_count = enum_obj.fields.count(); - if (field_count == 0) return .{ .signedness = .unsigned, .bits = 0 }; - return .{ .signedness = .unsigned, .bits = smallestUnsignedBits(field_count - 1) }; - }, + .error_set, .error_set_single, .error_set_inferred, .error_set_merged => { + // TODO revisit this when error sets support custom int types + return .{ .signedness = .unsigned, .bits = 16 }; + }, + + .vector => ty = ty.castTag(.vector).?.data.elem_type, + + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + assert(struct_obj.layout == .Packed); + ty = struct_obj.backing_int_ty; + }, - .error_set, .error_set_single, .anyerror, .error_set_inferred, .error_set_merged => { + else => unreachable, + }, + .anyerror_type => { // TODO revisit this when error sets support custom int types return .{ .signedness = .unsigned, .bits = 16 }; }, - - .vector => ty = ty.castTag(.vector).?.data.elem_type, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.layout == .Packed); - ty = struct_obj.backing_int_ty; + .usize_type => return .{ .signedness = .unsigned, .bits = target.ptrBitWidth() }, + .isize_type => return .{ .signedness = .signed, .bits = target.ptrBitWidth() }, + .c_char_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.char) }, + .c_short_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) }, + .c_ushort_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) }, + .c_int_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) }, + .c_uint_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) }, + .c_long_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) }, + .c_ulong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) }, + .c_longlong_type => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) }, + .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type, + .ptr_type => unreachable, + .array_type => unreachable, + .vector_type => @panic("TODO"), + .optional_type => unreachable, + .error_union_type => unreachable, + .simple_type => unreachable, // handled via Index enum tag above + .struct_type => @panic("TODO"), + .union_type => unreachable, + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .enum_tag => unreachable, }, - - else => unreachable, }; } @@ -5021,7 +4684,6 @@ pub const Type = struct { else => false, }; return switch (ty.tag()) { - .comptime_int, .u1, .u8, .i8, @@ -5114,7 +4776,6 @@ pub const Type = struct { }; while (true) switch (ty.tag()) { - .comptime_int, .u1, .u8, .i8, @@ -5127,9 +4788,7 @@ pub const Type = struct { .i64, .u128, .i128, - .bool, - .type, - .anyerror, + .error_union, .error_set_single, .error_set, @@ -5142,28 +4801,14 @@ pub const Type = struct { .const_slice_u8_sentinel_0, .const_slice, .mut_slice, - .anyopaque, .optional_single_mut_pointer, .optional_single_const_pointer, - .enum_literal, .anyerror_void_error_union, .error_set_inferred, .@"opaque", .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", .anyframe_T, .many_const_pointer, .many_mut_pointer, @@ -5258,10 +4903,6 @@ pub const Type = struct { }, .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .void => return Value.initTag(.void_value), - .noreturn => return Value.initTag(.unreachable_value), - .null => return Value.initTag(.null_value), - .undefined => return Value.initTag(.undef), .vector, .array, .array_u8 => { if (ty.arrayLen() == 0) @@ -5273,7 +4914,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .generic_poison => unreachable, }; } @@ -5358,22 +4998,7 @@ pub const Type = struct { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, + .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -5387,21 +5012,14 @@ pub const Type = struct { .error_set_inferred, .error_set_merged, .@"opaque", - .generic_poison, .array_u8, .array_u8_sentinel_0, .enum_simple, => false, .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .enum_literal, - .type_info, // These are function bodies, not function pointers. .function, - .null, - .undefined, => true, .inferred_alloc_mut => unreachable, @@ -5701,17 +5319,6 @@ pub const Type = struct { .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.fields, .enum_simple => ty.castTag(.enum_simple).?.data.fields, .enum_numbered => ty.castTag(.enum_numbered).?.data.fields, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - => @panic("TODO resolve std.builtin types"), else => unreachable, }; } @@ -5779,17 +5386,6 @@ pub const Type = struct { const tag_ty = mod.intType(.unsigned, bits) catch @panic("TODO: handle OOM here"); return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod); }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - => @panic("TODO resolve std.builtin types"), else => unreachable, } } @@ -6102,18 +5698,6 @@ pub const Type = struct { const opaque_obj = ty.cast(Payload.Opaque).?.data; return opaque_obj.srcLoc(mod); }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // needed to call resolveTypeFields first else => return null, } @@ -6150,29 +5734,17 @@ pub const Type = struct { const opaque_obj = ty.cast(Payload.Opaque).?.data; return opaque_obj.owner_decl; }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - => unreachable, // These need to be resolved earlier. else => return null, } } pub fn isGenericPoison(ty: Type) bool { - return switch (ty.ip_index) { - .generic_poison_type => true, - .none => ty.tag() == .generic_poison, - else => false, - }; + return ty.ip_index == .generic_poison_type; + } + + pub fn isVarArgsParam(ty: Type) bool { + return ty.ip_index == .none and ty.tag() == .var_args_param; } /// This enum does not directly correspond to `std.builtin.TypeId` because @@ -6195,28 +5767,7 @@ pub const Type = struct { i64, u128, i128, - anyopaque, - bool, - void, - type, - anyerror, - comptime_int, - noreturn, - @"anyframe", - null, - undefined, - enum_literal, - atomic_order, - atomic_rmw_op, - calling_convention, - address_space, - float_mode, - reduce_op, - modifier, - prefetch_options, - export_options, - extern_options, - type_info, + manyptr_u8, manyptr_const_u8, manyptr_const_u8_sentinel_0, @@ -6224,7 +5775,6 @@ pub const Type = struct { const_slice_u8, const_slice_u8_sentinel_0, anyerror_void_error_union, - generic_poison, /// Same as `empty_struct` except it has an empty namespace. empty_struct_literal, /// This is a special value that tracks a set of types that have been stored @@ -6292,39 +5842,17 @@ pub const Type = struct { .i64, .u128, .i128, - .anyopaque, - .bool, - .void, - .type, - .anyerror, - .comptime_int, - .noreturn, - .enum_literal, - .null, - .undefined, + .single_const_pointer_to_comptime_int, .anyerror_void_error_union, .const_slice_u8, .const_slice_u8_sentinel_0, - .generic_poison, .inferred_alloc_const, .inferred_alloc_mut, .empty_struct_literal, .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), .array_u8, @@ -6674,18 +6202,19 @@ pub const Type = struct { pub const @"f80": Type = .{ .ip_index = .f80_type, .legacy = undefined }; pub const @"f128": Type = .{ .ip_index = .f128_type, .legacy = undefined }; - pub const @"bool" = initTag(.bool); + pub const @"bool": Type = .{ .ip_index = .bool_type, .legacy = undefined }; pub const @"usize": Type = .{ .ip_index = .usize_type, .legacy = undefined }; pub const @"isize": Type = .{ .ip_index = .isize_type, .legacy = undefined }; pub const @"comptime_int": Type = .{ .ip_index = .comptime_int_type, .legacy = undefined }; pub const @"comptime_float": Type = .{ .ip_index = .comptime_float_type, .legacy = undefined }; - pub const @"void" = initTag(.void); - pub const @"type" = initTag(.type); - pub const @"anyerror" = initTag(.anyerror); - pub const @"anyopaque" = initTag(.anyopaque); - pub const @"null" = initTag(.null); - pub const @"undefined" = initTag(.undefined); - pub const @"noreturn" = initTag(.noreturn); + pub const @"void": Type = .{ .ip_index = .void_type, .legacy = undefined }; + pub const @"type": Type = .{ .ip_index = .type_type, .legacy = undefined }; + pub const @"anyerror": Type = .{ .ip_index = .anyerror_type, .legacy = undefined }; + pub const @"anyopaque": Type = .{ .ip_index = .anyopaque_type, .legacy = undefined }; + pub const @"anyframe": Type = .{ .ip_index = .anyframe_type, .legacy = undefined }; + pub const @"null": Type = .{ .ip_index = .null_type, .legacy = undefined }; + pub const @"undefined": Type = .{ .ip_index = .undefined_type, .legacy = undefined }; + pub const @"noreturn": Type = .{ .ip_index = .noreturn_type, .legacy = undefined }; pub const @"c_char": Type = .{ .ip_index = .c_char_type, .legacy = undefined }; pub const @"c_short": Type = .{ .ip_index = .c_short_type, .legacy = undefined }; @@ -6698,6 +6227,8 @@ pub const Type = struct { pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; + pub const generic_poison: Type = .{ .ip_index = .generic_poison_type, .legacy = undefined }; + pub const err_int = Type.u16; pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { diff --git a/src/value.zig b/src/value.zig index b0484dfc76..2f9f395017 100644 --- a/src/value.zig +++ b/src/value.zig @@ -991,26 +991,26 @@ pub const Value = struct { .null_type => Type.null, .undefined_type => Type.undefined, .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), - .anyframe_type => Type.initTag(.@"anyframe"), + .anyframe_type => Type.@"anyframe", .const_slice_u8_type => Type.initTag(.const_slice_u8), .const_slice_u8_sentinel_0_type => Type.initTag(.const_slice_u8_sentinel_0), .anyerror_void_error_union_type => Type.initTag(.anyerror_void_error_union), - .generic_poison_type => Type.initTag(.generic_poison), - .enum_literal_type => Type.initTag(.enum_literal), + .generic_poison_type => .{ .ip_index = .generic_poison_type, .legacy = undefined }, + .enum_literal_type => .{ .ip_index = .enum_literal_type, .legacy = undefined }, .manyptr_u8_type => Type.initTag(.manyptr_u8), .manyptr_const_u8_type => Type.initTag(.manyptr_const_u8), .manyptr_const_u8_sentinel_0_type => Type.initTag(.manyptr_const_u8_sentinel_0), - .atomic_order_type => Type.initTag(.atomic_order), - .atomic_rmw_op_type => Type.initTag(.atomic_rmw_op), - .calling_convention_type => Type.initTag(.calling_convention), - .address_space_type => Type.initTag(.address_space), - .float_mode_type => Type.initTag(.float_mode), - .reduce_op_type => Type.initTag(.reduce_op), - .modifier_type => Type.initTag(.modifier), - .prefetch_options_type => Type.initTag(.prefetch_options), - .export_options_type => Type.initTag(.export_options), - .extern_options_type => Type.initTag(.extern_options), - .type_info_type => Type.initTag(.type_info), + .atomic_order_type => .{ .ip_index = .atomic_order_type, .legacy = undefined }, + .atomic_rmw_op_type => .{ .ip_index = .atomic_rmw_op_type, .legacy = undefined }, + .calling_convention_type => .{ .ip_index = .calling_convention_type, .legacy = undefined }, + .address_space_type => .{ .ip_index = .address_space_type, .legacy = undefined }, + .float_mode_type => .{ .ip_index = .float_mode_type, .legacy = undefined }, + .reduce_op_type => .{ .ip_index = .reduce_op_type, .legacy = undefined }, + .modifier_type => .{ .ip_index = .call_modifier_type, .legacy = undefined }, + .prefetch_options_type => .{ .ip_index = .prefetch_options_type, .legacy = undefined }, + .export_options_type => .{ .ip_index = .export_options_type, .legacy = undefined }, + .extern_options_type => .{ .ip_index = .extern_options_type, .legacy = undefined }, + .type_info_type => .{ .ip_index = .type_info_type, .legacy = undefined }, else => unreachable, }; -- cgit v1.2.3 From 85c69c51945d7fb5d4cd2dea03fdb7915ecc55fa Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 20:04:47 -0700 Subject: Type.isSlice: make it InternPool aware --- src/InternPool.zig | 50 ++++- src/Sema.zig | 56 +++--- src/TypedValue.zig | 2 +- src/arch/aarch64/abi.zig | 2 +- src/arch/arm/abi.zig | 2 +- src/arch/riscv64/abi.zig | 2 +- src/arch/wasm/CodeGen.zig | 20 +- src/arch/wasm/abi.zig | 2 +- src/arch/x86_64/CodeGen.zig | 4 +- src/codegen.zig | 10 +- src/codegen/c.zig | 16 +- src/codegen/llvm.zig | 15 +- src/codegen/spirv.zig | 4 +- src/link/Dwarf.zig | 2 +- src/type.zig | 433 ++++++++++++++++++++++---------------------- src/value.zig | 8 +- 16 files changed, 341 insertions(+), 287 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index 6345d36f26..3ecc18c426 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -629,7 +629,7 @@ pub const Tag = enum(u8) { /// A vector type. /// data is payload to Vector. type_vector, - /// A pointer type along with all its bells and whistles. + /// A fully explicitly specified pointer type. /// data is payload to Pointer. type_pointer, /// An optional type. @@ -682,13 +682,13 @@ pub const Tag = enum(u8) { /// An enum tag identified by a negative integer value. /// data is a limbs index to Int. enum_tag_negative, - /// A float value that can be represented by f32. + /// An f32 value. /// data is float value bitcasted to u32. float_f32, - /// A float value that can be represented by f64. + /// An f64 value. /// data is payload index to Float64. float_f64, - /// A float value that can be represented by f128. + /// An f128 value. /// data is payload index to Float128. float_f128, /// An extern function. @@ -871,7 +871,47 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .simple_type => .{ .simple_type = @intToEnum(SimpleType, data) }, .simple_value => .{ .simple_value = @intToEnum(SimpleValue, data) }, - else => @panic("TODO"), + .type_vector => { + const vector_info = ip.extraData(Vector, data); + return .{ .vector_type = .{ + .len = vector_info.len, + .child = vector_info.child, + } }; + }, + + .type_pointer => { + const ptr_info = ip.extraData(Pointer, data); + return .{ .ptr_type = .{ + .elem_type = ptr_info.child, + .sentinel = ptr_info.sentinel, + .alignment = ptr_info.flags.alignment, + .size = ptr_info.flags.size, + .is_const = ptr_info.flags.is_const, + .is_volatile = ptr_info.flags.is_volatile, + .is_allowzero = ptr_info.flags.is_allowzero, + .address_space = ptr_info.flags.address_space, + } }; + }, + + .type_optional => .{ .optional_type = .{ .payload_type = @intToEnum(Index, data) } }, + + .type_error_union => @panic("TODO"), + .type_enum_simple => @panic("TODO"), + .simple_internal => @panic("TODO"), + .int_small_u32 => @panic("TODO"), + .int_small_i32 => @panic("TODO"), + .int_small_usize => @panic("TODO"), + .int_small_comptime_unsigned => @panic("TODO"), + .int_small_comptime_signed => @panic("TODO"), + .int_positive => @panic("TODO"), + .int_negative => @panic("TODO"), + .enum_tag_positive => @panic("TODO"), + .enum_tag_negative => @panic("TODO"), + .float_f32 => @panic("TODO"), + .float_f64 => @panic("TODO"), + .float_f128 => @panic("TODO"), + .extern_func => @panic("TODO"), + .func => @panic("TODO"), }; } diff --git a/src/Sema.zig b/src/Sema.zig index 65475104aa..0eecda5d16 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2030,7 +2030,7 @@ fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - if (ty.isSlice()) { + if (ty.isSlice(mod)) { try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)}); } break :msg msg; @@ -10359,7 +10359,7 @@ fn zirSwitchCond( .ErrorSet, .Enum, => { - if (operand_ty.isSlice()) { + if (operand_ty.isSlice(mod)) { return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)}); } if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| { @@ -12017,7 +12017,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ty = try sema.resolveTypeFields(unresolved_ty); const has_field = hf: { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { if (mem.eql(u8, field_name, "ptr")) break :hf true; if (mem.eql(u8, field_name, "len")) break :hf true; break :hf false; @@ -20020,8 +20020,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); } - const dest_is_slice = dest_ty.isSlice(); - const operand_is_slice = operand_ty.isSlice(); + const dest_is_slice = dest_ty.isSlice(mod); + const operand_is_slice = operand_ty.isSlice(mod); if (dest_is_slice and !operand_is_slice) { return sema.fail(block, dest_ty_src, "illegal pointer cast to slice", .{}); } @@ -20274,14 +20274,14 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A Type.usize, Value.initPayload(&val_payload.base), ); - const actual_ptr = if (ptr_ty.isSlice()) + const actual_ptr = if (ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr, ptr_ty) else ptr; const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr); const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); - const ok = if (ptr_ty.isSlice()) ok: { + const ok = if (ptr_ty.isSlice(mod)) ok: { const len = try sema.analyzeSliceLen(block, ptr_src, ptr); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bit_or, len_zero, is_aligned); @@ -22336,7 +22336,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // Change the src from slice to a many pointer, to avoid multiple ptr // slice extractions in AIR instructions. const new_src_ptr_ty = sema.typeOf(new_src_ptr); - if (new_src_ptr_ty.isSlice()) { + if (new_src_ptr_ty.isSlice(mod)) { new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty); } } else if (dest_len == .none and len_val == null) { @@ -22344,7 +22344,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr); new_dest_ptr = try sema.analyzeSlice(block, dest_src, dest_ptr_ptr, .zero, src_len, .none, .unneeded, dest_src, dest_src, dest_src, false); const new_src_ptr_ty = sema.typeOf(new_src_ptr); - if (new_src_ptr_ty.isSlice()) { + if (new_src_ptr_ty.isSlice(mod)) { new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty); } } @@ -22363,7 +22363,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // Extract raw pointer from dest slice. The AIR instructions could support them, but // it would cause redundant machine code instructions. const new_dest_ptr_ty = sema.typeOf(new_dest_ptr); - const raw_dest_ptr = if (new_dest_ptr_ty.isSlice()) + const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty) else new_dest_ptr; @@ -23383,7 +23383,7 @@ fn validateExternType( .Float, .AnyFrame, => return true, - .Pointer => return !(ty.isSlice() or try sema.typeRequiresComptime(ty)), + .Pointer => return !(ty.isSlice(mod) or try sema.typeRequiresComptime(ty)), .Int => switch (ty.intInfo(mod).bits) { 8, 16, 32, 64, 128 => return true, else => return false, @@ -23448,7 +23448,7 @@ fn explainWhyTypeIsNotExtern( => return, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); } else { const pointee_ty = ty.childType(); @@ -23523,7 +23523,7 @@ fn validatePackedType(ty: Type, mod: *const Module) bool { .Vector, .Enum, => return true, - .Pointer => return !ty.isSlice(), + .Pointer => return !ty.isSlice(mod), .Struct, .Union => return ty.containerLayout() == .Packed, } } @@ -23803,7 +23803,7 @@ fn panicSentinelMismatch( const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val); const ptr_ty = sema.typeOf(ptr); - const actual_sentinel = if (ptr_ty.isSlice()) + const actual_sentinel = if (ptr_ty.isSlice(mod)) try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index) else blk: { const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null); @@ -24064,7 +24064,7 @@ fn fieldVal( const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); - if (child_type.isSlice()) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); + if (child_type.isSlice(mod)) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); break :msg msg; }; @@ -24140,7 +24140,7 @@ fn fieldPtr( ); } }, - .Pointer => if (inner_ty.isSlice()) { + .Pointer => if (inner_ty.isSlice(mod)) { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else @@ -25743,8 +25743,8 @@ fn coerceExtra( } }; break :pointer; } - if (dest_ty.isSlice()) break :to_anyopaque; - if (inst_ty.isSlice()) { + if (dest_ty.isSlice(mod)) break :to_anyopaque; + if (inst_ty.isSlice(mod)) { in_memory_result = .{ .slice_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25885,7 +25885,7 @@ fn coerceExtra( return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src); }, .Many => p: { - if (!inst_ty.isSlice()) break :p; + if (!inst_ty.isSlice(mod)) break :p; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; const inst_info = inst_ty.ptrInfo().data; @@ -26651,7 +26651,7 @@ fn coerceInMemoryAllowed( } // Slices - if (dest_ty.isSlice() and src_ty.isSlice()) { + if (dest_ty.isSlice(mod) and src_ty.isSlice(mod)) { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } @@ -27744,7 +27744,7 @@ fn beginComptimePtrMutation( ); }, .Pointer => { - assert(parent.ty.isSlice()); + assert(parent.ty.isSlice(mod)); val_ptr.* = try Value.Tag.slice.create(arena, .{ .ptr = Value.undef, .len = Value.undef, @@ -28187,7 +28187,7 @@ fn beginComptimePtrLoad( break :blk deref; } - if (field_ptr.container_ty.isSlice()) { + if (field_ptr.container_ty.isSlice(mod)) { const slice_val = tv.val.castTag(.slice).?.data; deref.pointee = switch (field_index) { Value.Payload.Slice.ptr_index => TypedValue{ @@ -28442,13 +28442,13 @@ fn coerceCompatiblePtrs( if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { - const actual_ptr = if (inst_ty.isSlice()) + const actual_ptr = if (inst_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty) else inst; const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); - const ok = if (inst_ty.isSlice()) ok: { + const ok = if (inst_ty.isSlice(mod)) ok: { const len = try sema.analyzeSliceLen(block, inst_src, inst); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero); @@ -29548,7 +29548,7 @@ fn analyzeSlice( else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}), } - const ptr = if (slice_ty.isSlice()) + const ptr = if (slice_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty) else ptr_or_slice; @@ -29605,7 +29605,7 @@ fn analyzeSlice( } break :e try sema.addConstant(Type.usize, len_val); - } else if (slice_ty.isSlice()) { + } else if (slice_ty.isSlice(mod)) { if (!end_is_len) { const end = if (by_length) end: { const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); @@ -29778,7 +29778,7 @@ fn analyzeSlice( try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } - if (slice_ty.isSlice()) { + if (slice_ty.isSlice(mod)) { const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); const actual_len = if (slice_ty.sentinel() == null) slice_len_inst @@ -29840,7 +29840,7 @@ fn analyzeSlice( // requirement: end <= len const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel()) - else if (slice_ty.isSlice()) blk: { + else if (slice_ty.isSlice(mod)) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 7302f42e57..877a8f5f4c 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -259,7 +259,7 @@ pub fn print( } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.isSlice()) { + } else if (field_ptr.container_ty.isSlice(mod)) { switch (field_ptr.field_index) { Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), Value.Payload.Slice.len_index => return writer.writeAll(".len"), diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index cbfd6a1171..821afd27ae 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -52,7 +52,7 @@ pub fn classifyType(ty: Type, mod: *const Module) Class { return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index ca7fff7d08..eee4b41eef 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -94,7 +94,7 @@ pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class { return .byval; }, .Pointer => { - assert(!ty.isSlice()); + assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index c9e0873bce..ac0d8d3e32 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -52,7 +52,7 @@ pub fn classifyType(ty: Type, mod: *const Module) Class { return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2c1e8aa36d..bb3f1f769d 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1773,7 +1773,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { }, .Pointer => { // Slices act like struct and will be passed by reference - if (ty.isSlice()) return true; + if (ty.isSlice(mod)) return true; return false; }, } @@ -2396,7 +2396,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE }, }, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { // store pointer first // lower it to the stack so we do not have to store rhs into a local first try func.emitWValue(lhs); @@ -3010,11 +3010,11 @@ fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.In } fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { - if (tv.ty.isSlice()) { + const mod = func.bin_file.base.options.module.?; + if (tv.ty.isSlice(mod)) { return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) }; } - const mod = func.bin_file.base.options.module.?; const decl = mod.declPtr(decl_index); if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) { return WValue{ .imm32 = 0xaaaaaaaa }; @@ -4182,7 +4182,7 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod }; try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); } - } else if (payload_ty.isSlice()) { + } else if (payload_ty.isSlice(mod)) { switch (func.arch()) { .wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }), .wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }), @@ -4455,10 +4455,11 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const ptr_ty = func.typeOf(un_op); - const result = if (ptr_ty.isSlice()) + const result = if (ptr_ty.isSlice(mod)) try func.slicePtr(operand) else switch (operand) { // for stack offset, return a pointer to this offset. @@ -4479,7 +4480,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_size = elem_ty.abiSize(mod); // load pointer onto the stack - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { _ = try func.load(ptr, Type.usize, 0); } else { try func.lowerToStack(ptr); @@ -4518,7 +4519,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const index = try func.resolveInst(bin_op.rhs); // load pointer onto the stack - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { _ = try func.load(ptr, Type.usize, 0); } else { try func.lowerToStack(ptr); @@ -5441,7 +5442,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue { - if (ptr_ty.isSlice()) { + const mod = func.bin_file.base.options.module.?; + if (ptr_ty.isSlice(mod)) { return func.slicePtr(ptr); } else { return ptr; diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 7dd4425c01..c7819b0fa6 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -60,7 +60,7 @@ pub fn classifyType(ty: Type, mod: *const Module) [2]Class { return direct; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return direct; }, .Union => { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 3e0ca4831b..ad67a0db3d 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8688,7 +8688,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; @@ -8781,7 +8781,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; diff --git a/src/codegen.zig b/src/codegen.zig index a3ecf88d50..c9e2c6c265 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -317,11 +317,11 @@ pub fn generateSymbol( switch (target.ptrBitWidth()) { 32 => { mem.writeInt(u32, try code.addManyAsArray(4), 0, endian); - if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 4); + if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 4); }, 64 => { mem.writeInt(u64, try code.addManyAsArray(8), 0, endian); - if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 8); + if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 8); }, else => unreachable, } @@ -845,7 +845,7 @@ fn lowerParentPtr( debug_output, reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) { .Pointer => offset: { - assert(field_ptr.container_ty.isSlice()); + assert(field_ptr.container_ty.isSlice(mod)); var buf: Type.SlicePtrFieldTypeBuffer = undefined; break :offset switch (field_ptr.field_index) { 0 => 0, @@ -946,7 +946,7 @@ fn lowerDeclRef( ) CodeGenError!Result { const target = bin_file.options.target; const mod = bin_file.options.module.?; - if (typed_value.ty.isSlice()) { + if (typed_value.ty.isSlice(mod)) { // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); @@ -1174,7 +1174,7 @@ pub fn genTypedValue( const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); - if (!typed_value.ty.isSlice()) { + if (!typed_value.ty.isSlice(mod)) { if (typed_value.val.castTag(.variable)) |payload| { return genDeclRef(bin_file, src_loc, typed_value, payload.data.owner_decl); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 327ccb0119..cd4f36e574 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -556,7 +556,7 @@ pub const DeclGen = struct { if (decl.val.castTag(.variable)) |var_payload| try dg.renderFwdDecl(decl_index, var_payload.data); - if (ty.isSlice()) { + if (ty.isSlice(mod)) { if (location == .StaticInitializer) { try writer.writeByte('{'); } else { @@ -603,7 +603,7 @@ pub const DeclGen = struct { fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { const mod = dg.module; - if (!ptr_ty.isSlice()) { + if (!ptr_ty.isSlice(mod)) { try writer.writeByte('('); try dg.renderType(writer, ptr_ty); try writer.writeByte(')'); @@ -776,7 +776,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, repr_ty, Value.undef, .FunctionArgument); return writer.writeByte(')'); }, - .Pointer => if (ty.isSlice()) { + .Pointer => if (ty.isSlice(mod)) { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); @@ -1045,7 +1045,7 @@ pub const DeclGen = struct { return; }, .Pointer => switch (val.tag()) { - .null_value, .zero => if (ty.isSlice()) { + .null_value, .zero => if (ty.isSlice(mod)) { var slice_pl = Value.Payload.Slice{ .base = .{ .tag = .slice }, .data = .{ .ptr = val, .len = Value.undef }, @@ -5073,7 +5073,7 @@ fn airIsNull( TypedValue{ .ty = optional_ty, .val = Value.null } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) TypedValue{ .ty = payload_ty, .val = Value.zero } - else if (payload_ty.isSlice() and optional_ty.optionalReprIsPayload(mod)) rhs: { + else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf); break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; @@ -5864,6 +5864,7 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { } fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const operand = try f.resolveInst(un_op); @@ -5877,7 +5878,7 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(" = ("); try f.renderType(writer, inst_ty); try writer.writeByte(')'); - if (operand_ty.isSlice()) { + if (operand_ty.isSlice(mod)) { try f.writeCValueMember(writer, operand, .{ .identifier = "len" }); } else { try f.writeCValue(writer, operand, .Other); @@ -6272,7 +6273,8 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa } fn writeSliceOrPtr(f: *Function, writer: anytype, ptr: CValue, ptr_ty: Type) !void { - if (ptr_ty.isSlice()) { + const mod = f.object.dg.module; + if (ptr_ty.isSlice(mod)) { try f.writeCValueMember(writer, ptr, .{ .identifier = "ptr" }); } else { try f.writeCValue(writer, ptr, .FunctionArgument); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 1a092dff69..5d9345c84f 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1636,7 +1636,7 @@ pub const Object = struct { return ptr_di_ty; } - if (ty.isSlice()) { + if (ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = ty.slicePtrFieldType(&buf); const len_ty = Type.usize; @@ -2833,7 +2833,7 @@ pub const DeclGen = struct { }, .Bool => return dg.context.intType(1), .Pointer => { - if (t.isSlice()) { + if (t.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_type = t.slicePtrFieldType(&buf); @@ -4110,7 +4110,7 @@ pub const DeclGen = struct { } }, .Pointer => { - assert(parent_ty.isSlice()); + assert(parent_ty.isSlice(mod)); const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), llvm_u32.constInt(field_index, .False), @@ -4184,7 +4184,7 @@ pub const DeclGen = struct { decl_index: Module.Decl.Index, ) Error!*llvm.Value { const mod = self.module; - if (tv.ty.isSlice()) { + if (tv.ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf); var slice_len: Value.Payload.U64 = .{ @@ -5794,7 +5794,8 @@ pub const FuncGen = struct { } fn sliceOrArrayPtr(fg: *FuncGen, ptr: *llvm.Value, ty: Type) *llvm.Value { - if (ty.isSlice()) { + const mod = fg.dg.module; + if (ty.isSlice(mod)) { return fg.builder.buildExtractValue(ptr, 0, ""); } else { return ptr; @@ -6669,7 +6670,7 @@ pub const FuncGen = struct { self.builder.buildLoad(optional_llvm_ty, operand, "") else operand; - if (payload_ty.isSlice()) { + if (payload_ty.isSlice(mod)) { const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf)); @@ -10864,7 +10865,7 @@ const ParamTypeIterator = struct { it.zig_index += 1; it.llvm_index += 1; var buf: Type.Payload.ElemType = undefined; - if (ty.isSlice() or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice())) { + if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice(mod))) { it.llvm_index += 1; return .slice; } else if (isByRef(ty, mod)) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 417a8035b5..f69c6cb317 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2980,12 +2980,12 @@ pub const DeclGen = struct { // Pointer payload represents nullability: pointer or slice. var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = if (payload_ty.isSlice()) + const ptr_ty = if (payload_ty.isSlice(mod)) payload_ty.slicePtrFieldType(&ptr_buf) else payload_ty; - const ptr_id = if (payload_ty.isSlice()) + const ptr_id = if (payload_ty.isSlice(mod)) try self.extractField(Type.bool, operand_id, 0) else operand_id; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 9c6e54ea98..682431203e 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -258,7 +258,7 @@ pub const DeclState = struct { } }, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { // Slices are structs: struct { .ptr = *, .len = N } const ptr_bits = target.ptrBitWidth(); const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8)); diff --git a/src/type.zig b/src/type.zig index 0eff51251d..f05c5e15e8 100644 --- a/src/type.zig +++ b/src/type.zig @@ -229,7 +229,7 @@ pub const Type = struct { .Frame, => false, - .Pointer => !ty.isSlice() and (is_equality_cmp or ty.isCPtr()), + .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr()), .Optional => { if (!is_equality_cmp) return false; var buf: Payload.ElemType = undefined; @@ -369,209 +369,212 @@ pub const Type = struct { } pub fn ptrInfo(self: Type) Payload.Pointer { - switch (self.tag()) { - .single_const_pointer_to_comptime_int => return .{ .data = .{ - .pointee_type = Type.comptime_int, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .const_slice_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .const_slice_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .many_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .many_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .c_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = false, - .@"volatile" = false, - .size = .C, - } }, - .c_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = true, - .@"volatile" = false, - .size = .C, - } }, - .const_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .mut_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Slice, - } }, - - .pointer => return self.castTag(.pointer).?.*, - - .optional_single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .optional_single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - return child_type.ptrInfo(); - }, + switch (self.ip_index) { + .none => switch (self.tag()) { + .single_const_pointer_to_comptime_int => return .{ .data = .{ + .pointee_type = Type.comptime_int, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .One, + } }, + .const_slice_u8 => return .{ .data = .{ + .pointee_type = Type.u8, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Slice, + } }, + .const_slice_u8_sentinel_0 => return .{ .data = .{ + .pointee_type = Type.u8, + .sentinel = Value.zero, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Slice, + } }, + .single_const_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .One, + } }, + .single_mut_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = .One, + } }, + .many_const_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Many, + } }, + .manyptr_const_u8 => return .{ .data = .{ + .pointee_type = Type.u8, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Many, + } }, + .manyptr_const_u8_sentinel_0 => return .{ .data = .{ + .pointee_type = Type.u8, + .sentinel = Value.zero, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Many, + } }, + .many_mut_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = .Many, + } }, + .manyptr_u8 => return .{ .data = .{ + .pointee_type = Type.u8, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = .Many, + } }, + .c_const_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = true, + .mutable = false, + .@"volatile" = false, + .size = .C, + } }, + .c_mut_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = true, + .mutable = true, + .@"volatile" = false, + .size = .C, + } }, + .const_slice => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .Slice, + } }, + .mut_slice => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = .Slice, + } }, + + .pointer => return self.castTag(.pointer).?.*, + + .optional_single_mut_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = true, + .@"volatile" = false, + .size = .One, + } }, + .optional_single_const_pointer => return .{ .data = .{ + .pointee_type = self.castPointer().?.data, + .sentinel = null, + .@"align" = 0, + .@"addrspace" = .generic, + .bit_offset = 0, + .host_size = 0, + .@"allowzero" = false, + .mutable = false, + .@"volatile" = false, + .size = .One, + } }, + .optional => { + var buf: Payload.ElemType = undefined; + const child_type = self.optionalChild(&buf); + return child_type.ptrInfo(); + }, - else => unreachable, + else => unreachable, + }, + else => @panic("TODO"), } } @@ -3712,17 +3715,23 @@ pub const Type = struct { }; } - pub fn isSlice(self: Type) bool { - return switch (self.tag()) { - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => true, + pub fn isSlice(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .const_slice, + .mut_slice, + .const_slice_u8, + .const_slice_u8_sentinel_0, + => true, - .pointer => self.castTag(.pointer).?.data.size == .Slice, + .pointer => ty.castTag(.pointer).?.data.size == .Slice, - else => false, + else => false, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.size == .Slice, + else => false, + }, }; } diff --git a/src/value.zig b/src/value.zig index 396aab2012..cbf18c672c 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1144,7 +1144,7 @@ pub const Value = struct { }, }, .Pointer => { - if (ty.isSlice()) return error.IllDefinedMemoryLayout; + if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout; if (val.isDeclRef()) return error.ReinterpretDeclRef; return val.writeToMemory(Type.usize, mod, buffer); }, @@ -1261,7 +1261,7 @@ pub const Value = struct { }, }, .Pointer => { - assert(!ty.isSlice()); // No well defined layout. + assert(!ty.isSlice(mod)); // No well defined layout. if (val.isDeclRef()) return error.ReinterpretDeclRef; return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); }, @@ -1381,7 +1381,7 @@ pub const Value = struct { return Value.initPayload(&payload.base); }, .Pointer => { - assert(!ty.isSlice()); // No well defined layout. + assert(!ty.isSlice(mod)); // No well defined layout. return readFromMemory(Type.usize, mod, buffer, arena); }, .Optional => { @@ -1478,7 +1478,7 @@ pub const Value = struct { }, }, .Pointer => { - assert(!ty.isSlice()); // No well defined layout. + assert(!ty.isSlice(mod)); // No well defined layout. return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena); }, .Optional => { -- cgit v1.2.3 From 9d422bff18dbb92d3a6b8705c3dae7404a34bba6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 3 May 2023 20:40:54 -0700 Subject: stage2: move all integer types to InternPool --- src/AstGen.zig | 23 ++- src/Sema.zig | 52 ------- src/arch/aarch64/CodeGen.zig | 22 +-- src/arch/arm/CodeGen.zig | 24 +-- src/arch/sparc64/CodeGen.zig | 6 +- src/arch/wasm/CodeGen.zig | 18 +-- src/codegen/c/type.zig | 2 +- src/type.zig | 358 ++++++------------------------------------- 8 files changed, 106 insertions(+), 399 deletions(-) (limited to 'src/arch') diff --git a/src/AstGen.zig b/src/AstGen.zig index 6461b11d80..edd6099127 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -10271,6 +10271,8 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.i32_type), as_ty | @enumToInt(Zir.Inst.Ref.u64_type), as_ty | @enumToInt(Zir.Inst.Ref.i64_type), + as_ty | @enumToInt(Zir.Inst.Ref.u128_type), + as_ty | @enumToInt(Zir.Inst.Ref.i128_type), as_ty | @enumToInt(Zir.Inst.Ref.usize_type), as_ty | @enumToInt(Zir.Inst.Ref.isize_type), as_ty | @enumToInt(Zir.Inst.Ref.c_char_type), @@ -10296,11 +10298,30 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.comptime_int_type), as_ty | @enumToInt(Zir.Inst.Ref.comptime_float_type), as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type), + as_ty | @enumToInt(Zir.Inst.Ref.anyframe_type), as_ty | @enumToInt(Zir.Inst.Ref.null_type), as_ty | @enumToInt(Zir.Inst.Ref.undefined_type), + as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type), + as_ty | @enumToInt(Zir.Inst.Ref.atomic_order_type), + as_ty | @enumToInt(Zir.Inst.Ref.atomic_rmw_op_type), + as_ty | @enumToInt(Zir.Inst.Ref.calling_convention_type), + as_ty | @enumToInt(Zir.Inst.Ref.address_space_type), + as_ty | @enumToInt(Zir.Inst.Ref.float_mode_type), + as_ty | @enumToInt(Zir.Inst.Ref.reduce_op_type), + as_ty | @enumToInt(Zir.Inst.Ref.call_modifier_type), + as_ty | @enumToInt(Zir.Inst.Ref.prefetch_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.export_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.extern_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.type_info_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type), as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), - as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type), + as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_sentinel_0_type), + as_ty | @enumToInt(Zir.Inst.Ref.anyerror_void_error_union_type), + as_ty | @enumToInt(Zir.Inst.Ref.generic_poison_type), + as_ty | @enumToInt(Zir.Inst.Ref.empty_struct_type), as_comptime_int | @enumToInt(Zir.Inst.Ref.zero), as_comptime_int | @enumToInt(Zir.Inst.Ref.one), as_bool | @enumToInt(Zir.Inst.Ref.bool_true), diff --git a/src/Sema.zig b/src/Sema.zig index 0eecda5d16..7389719301 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -31478,19 +31478,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }; return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -32971,19 +32958,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }; switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .error_set_single, .error_set, .error_set_merged, @@ -33175,19 +33149,6 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } switch (ty.tag()) { - .u1 => return .u1_type, - .u8 => return .u8_type, - .i8 => return .i8_type, - .u16 => return .u16_type, - .u29 => return .u29_type, - .i16 => return .i16_type, - .u32 => return .u32_type, - .i32 => return .i32_type, - .u64 => return .u64_type, - .i64 => return .i64_type, - .u128 => return .u128_type, - .i128 => return .i128_type, - .manyptr_u8 => return .manyptr_u8_type, .manyptr_const_u8 => return .manyptr_const_u8_type, .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, @@ -33617,19 +33578,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } } return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 7098cf3f32..503bbdbb02 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -2577,7 +2577,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; }, @@ -2720,7 +2720,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits <= 64) { @@ -2860,7 +2860,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else return self.fail("TODO implement mul_with_overflow for integers > u64/i64", .{}); @@ -2993,7 +2993,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -3780,7 +3780,7 @@ fn genInlineMemset( const val_reg = switch (val) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.u8), val), + else => try self.copyToTmpRegister(Type.u8, val), }; const val_reg_lock = self.register_manager.lockReg(val_reg); defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4330,7 +4330,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .got, .sym_index = sym_index, @@ -4339,7 +4339,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = coff_file.getAtom(atom).getSymbolIndex().?; - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .got, .sym_index = sym_index, @@ -4379,7 +4379,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier }); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .import, .sym_index = sym_index, @@ -4536,7 +4536,7 @@ fn cmp( var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - break :blk Type.initTag(.u1); + break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { @@ -4546,9 +4546,9 @@ fn cmp( .Float => return self.fail("TODO ARM cmp floats", .{}), .Enum => lhs_ty.intTagType(), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, else => unreachable, }; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index bf94cf55a0..55ec0d4125 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1637,7 +1637,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits == 32) { @@ -1750,7 +1750,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits <= 32) { @@ -1848,7 +1848,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); // strb rdlo, [...] - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .register = rdlo }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .register = rdlo }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -1983,7 +1983,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -4086,7 +4086,7 @@ fn genInlineMemset( const val_reg = switch (val) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.u8), val), + else => try self.copyToTmpRegister(Type.u8, val), }; const val_reg_lock = self.register_manager.lockReg(val_reg); defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4485,7 +4485,7 @@ fn cmp( var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - break :blk Type.initTag(.u1); + break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { @@ -4495,9 +4495,9 @@ fn cmp( .Float => return self.fail("TODO ARM cmp floats", .{}), .Enum => lhs_ty.intTagType(), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, else => unreachable, }; @@ -5367,7 +5367,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro 1, 4 => { const offset = if (math.cast(u12, stack_offset)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }), .none); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -5390,7 +5390,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ .tag = .strh, @@ -5769,7 +5769,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I 1, 4 => { const offset = if (math.cast(u12, stack_offset)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }), .none); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -5789,7 +5789,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ .tag = .strh, diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index a519b73235..c565b6dc23 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1436,14 +1436,14 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { .Vector => unreachable, // Handled by cmp_vector. .Enum => lhs_ty.intTagType(), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, .Optional => blk: { var opt_buffer: Type.Payload.ElemType = undefined; const payload_ty = lhs_ty.optionalChild(&opt_buffer); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - break :blk Type.initTag(.u1); + break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index bb3f1f769d..7fc5dbc825 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4272,7 +4272,7 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const non_null_bit = try func.allocStack(Type.initTag(.u1)); + const non_null_bit = try func.allocStack(Type.u1); try func.emitWValue(non_null_bit); try func.addImm32(1); try func.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 }); @@ -5195,7 +5195,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: // We store the final result in here that will be validated // if the optional is truly equal. - var result = try func.ensureAllocLocal(Type.initTag(.i32)); + var result = try func.ensureAllocLocal(Type.i32); defer result.free(func); try func.startBlock(.block, wasm.block_empty); @@ -5658,7 +5658,7 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); + try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } @@ -5717,13 +5717,13 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, break :blk WValue{ .stack = {} }; }; - var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); + var overflow_local = try overflow_bit.toLocal(func, Type.u1); defer overflow_local.free(func); const result_ptr = try func.allocStack(result_ty); try func.store(result_ptr, high_op_res, Type.u64, 0); try func.store(result_ptr, tmp_op, Type.u64, 8); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), 16); + try func.store(result_ptr, overflow_local, Type.u1, 16); return result_ptr; } @@ -5774,13 +5774,13 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const shr = try func.binOp(result, rhs_final, lhs_ty, .shr); break :blk try func.cmp(.{ .stack = {} }, shr, lhs_ty, .neq); }; - var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); + var overflow_local = try overflow_bit.toLocal(func, Type.u1); defer overflow_local.free(func); const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); + try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } @@ -5800,7 +5800,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // We store the bit if it's overflowed or not in this. As it's zero-initialized // we only need to update it if an overflow (or underflow) occurred. - var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1)); + var overflow_bit = try func.ensureAllocLocal(Type.u1); defer overflow_bit.free(func); const int_info = lhs_ty.intInfo(mod); @@ -5955,7 +5955,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); const offset = @intCast(u32, lhs_ty.abiSize(mod)); - try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset); + try func.store(result_ptr, overflow_bit, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index d248753670..27fa997fd3 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1471,7 +1471,7 @@ pub const CType = extern union { else info.pointee_type; - if (if (info.size == .C and pointee_ty.tag() == .u8) + if (if (info.size == .C and pointee_ty.ip_index == .u8_type) Tag.char.toIndex() else try lookup.typeToIndex(pointee_ty, .forward)) |child_idx| diff --git a/src/type.zig b/src/type.zig index f05c5e15e8..868ae4231b 100644 --- a/src/type.zig +++ b/src/type.zig @@ -107,20 +107,6 @@ pub const Type = struct { } } switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => return .Int, - .error_set, .error_set_single, .error_set_inferred, @@ -589,26 +575,6 @@ pub const Type = struct { if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => { - if (b.zigTypeTag(mod) != .Int) return false; - if (b.isNamedInt()) return false; - const info_a = a.intInfo(mod); - const info_b = b.intInfo(mod); - return info_a.signedness == info_b.signedness and info_a.bits == info_b.bits; - }, - .error_set_inferred => { // Inferred error sets are only equal if both are inferred // and they share the same pointer. @@ -926,26 +892,6 @@ pub const Type = struct { return; } switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => { - // Arbitrary sized integers. - std.hash.autoHash(hasher, std.builtin.TypeId.Int); - const info = ty.intInfo(mod); - std.hash.autoHash(hasher, info.signedness); - std.hash.autoHash(hasher, info.bits); - }, - .error_set, .error_set_single, .error_set_merged, @@ -1183,18 +1129,6 @@ pub const Type = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, @@ -1435,20 +1369,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => return writer.writeAll(@tagName(t)), - .empty_struct, .empty_struct_literal => return writer.writeAll("struct {}"), .@"struct" => { @@ -1775,20 +1695,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => try writer.writeAll(@tagName(t)), - .empty_struct_literal => try writer.writeAll("@TypeOf(.{})"), .empty_struct => { @@ -2057,16 +1963,6 @@ pub const Type = struct { pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { - .u1 => return Value{ .ip_index = .u1_type, .legacy = undefined }, - .u8 => return Value{ .ip_index = .u8_type, .legacy = undefined }, - .i8 => return Value{ .ip_index = .i8_type, .legacy = undefined }, - .u16 => return Value{ .ip_index = .u16_type, .legacy = undefined }, - .u29 => return Value{ .ip_index = .u29_type, .legacy = undefined }, - .i16 => return Value{ .ip_index = .i16_type, .legacy = undefined }, - .u32 => return Value{ .ip_index = .u32_type, .legacy = undefined }, - .i32 => return Value{ .ip_index = .i32_type, .legacy = undefined }, - .u64 => return Value{ .ip_index = .u64_type, .legacy = undefined }, - .i64 => return Value{ .ip_index = .i64_type, .legacy = undefined }, .single_const_pointer_to_comptime_int => return Value{ .ip_index = .single_const_pointer_to_comptime_int_type, .legacy = undefined }, .const_slice_u8 => return Value{ .ip_index = .const_slice_u8_type, .legacy = undefined }, .const_slice_u8_sentinel_0 => return Value{ .ip_index = .const_slice_u8_sentinel_0_type, .legacy = undefined }, @@ -2162,19 +2058,6 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .const_slice_u8, .const_slice_u8_sentinel_0, .array_u8_sentinel_0, @@ -2404,19 +2287,6 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -2752,10 +2622,6 @@ pub const Type = struct { else => null, }; switch (ty.tag()) { - .u1, - .u8, - .i8, - .array_u8_sentinel_0, .array_u8, .@"opaque", @@ -2806,12 +2672,6 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) }; }, - .i16, .u16 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(16, target) }, - .u29 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(29, target) }, - .i32, .u32 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(32, target) }, - .i64, .u64 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(64, target) }, - .u128, .i128 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(128, target) }, - .optional => { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); @@ -3208,11 +3068,6 @@ pub const Type = struct { return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); }, - .u1, - .u8, - .i8, - => return AbiSizeAdvanced{ .scalar = 1 }, - .array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data }, .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 }, .array => { @@ -3293,12 +3148,6 @@ pub const Type = struct { .error_set_single, => return AbiSizeAdvanced{ .scalar = 2 }, - .i16, .u16 => return AbiSizeAdvanced{ .scalar = intAbiSize(16, target) }, - .u29 => return AbiSizeAdvanced{ .scalar = intAbiSize(29, target) }, - .i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) }, - .i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) }, - .u128, .i128 => return AbiSizeAdvanced{ .scalar = intAbiSize(128, target) }, - .optional => { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); @@ -3497,14 +3346,6 @@ pub const Type = struct { .inferred_alloc_mut => unreachable, .@"opaque" => unreachable, - .u1 => return 1, - .u8, .i8 => return 8, - .i16, .u16 => return 16, - .u29 => return 29, - .i32, .u32 => return 32, - .i64, .u64 => return 64, - .u128, .i128 => return 128, - .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; if (struct_obj.layout != .Packed) { @@ -4398,47 +4239,25 @@ pub const Type = struct { /// Returns true if and only if the type is a fixed-width, signed integer. pub fn isSignedInt(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.signedness == .signed, - .simple_type => |s| return switch (s) { - .c_char, .isize, .c_short, .c_int, .c_long, .c_longlong => true, + return switch (ty.ip_index) { + .c_char_type, .isize_type, .c_short_type, .c_int_type, .c_long_type, .c_longlong_type => true, + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| int_type.signedness == .signed, else => false, }, - else => return false, - }; - return switch (ty.tag()) { - .i8, - .i16, - .i32, - .i64, - .i128, - => true, - - else => false, }; } /// Returns true if and only if the type is a fixed-width, unsigned integer. pub fn isUnsignedInt(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.signedness == .unsigned, - .simple_type => |s| return switch (s) { - .usize, .c_ushort, .c_uint, .c_ulong, .c_ulonglong => true, + return switch (ty.ip_index) { + .usize_type, .c_ushort_type, .c_uint_type, .c_ulong_type, .c_ulonglong_type => true, + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| int_type.signedness == .unsigned, else => false, }, - else => return false, - }; - return switch (ty.tag()) { - .u1, - .u8, - .u16, - .u29, - .u32, - .u64, - .u128, - => true, - - else => false, }; } @@ -4459,19 +4278,6 @@ pub const Type = struct { while (true) switch (ty.ip_index) { .none => switch (ty.tag()) { - .u1 => return .{ .signedness = .unsigned, .bits = 1 }, - .u8 => return .{ .signedness = .unsigned, .bits = 8 }, - .i8 => return .{ .signedness = .signed, .bits = 8 }, - .u16 => return .{ .signedness = .unsigned, .bits = 16 }, - .i16 => return .{ .signedness = .signed, .bits = 16 }, - .u29 => return .{ .signedness = .unsigned, .bits = 29 }, - .u32 => return .{ .signedness = .unsigned, .bits = 32 }, - .i32 => return .{ .signedness = .signed, .bits = 32 }, - .u64 => return .{ .signedness = .unsigned, .bits = 64 }, - .i64 => return .{ .signedness = .signed, .bits = 64 }, - .u128 => return .{ .signedness = .unsigned, .bits = 128 }, - .i128 => return .{ .signedness = .signed, .bits = 128 }, - .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, .enum_simple => { @@ -4664,50 +4470,34 @@ pub const Type = struct { } pub fn isNumeric(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => true, - .simple_type => |s| return switch (s) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .comptime_int, - .comptime_float, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - => true, + return switch (ty.ip_index) { + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .c_longdouble_type, + .comptime_int_type, + .comptime_float_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => true, + + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => true, else => false, }, - else => false, - }; - return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - => true, - - else => false, }; } @@ -4785,19 +4575,6 @@ pub const Type = struct { }; while (true) switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .error_union, .error_set_single, .error_set, @@ -4995,19 +4772,6 @@ pub const Type = struct { }; return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -5764,19 +5528,6 @@ pub const Type = struct { /// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`. pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - u1, - u8, - i8, - u16, - i16, - u29, - u32, - i32, - u64, - i64, - u128, - i128, - manyptr_u8, manyptr_const_u8, manyptr_const_u8_sentinel_0, @@ -5839,19 +5590,6 @@ pub const Type = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .single_const_pointer_to_comptime_int, .anyerror_void_error_union, .const_slice_u8, @@ -6203,19 +5941,19 @@ pub const Type = struct { }; }; - pub const @"u1" = initTag(.u1); - pub const @"u8" = initTag(.u8); - pub const @"u16" = initTag(.u16); - pub const @"u29" = initTag(.u29); - pub const @"u32" = initTag(.u32); - pub const @"u64" = initTag(.u64); - pub const @"u128" = initTag(.u128); - - pub const @"i8" = initTag(.i8); - pub const @"i16" = initTag(.i16); - pub const @"i32" = initTag(.i32); - pub const @"i64" = initTag(.i64); - pub const @"i128" = initTag(.i128); + pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined }; + pub const @"u8": Type = .{ .ip_index = .u8_type, .legacy = undefined }; + pub const @"u16": Type = .{ .ip_index = .u16_type, .legacy = undefined }; + pub const @"u29": Type = .{ .ip_index = .u29_type, .legacy = undefined }; + pub const @"u32": Type = .{ .ip_index = .u32_type, .legacy = undefined }; + pub const @"u64": Type = .{ .ip_index = .u64_type, .legacy = undefined }; + pub const @"u128": Type = .{ .ip_index = .u128_type, .legacy = undefined }; + + pub const @"i8": Type = .{ .ip_index = .i8_type, .legacy = undefined }; + pub const @"i16": Type = .{ .ip_index = .i16_type, .legacy = undefined }; + pub const @"i32": Type = .{ .ip_index = .i32_type, .legacy = undefined }; + pub const @"i64": Type = .{ .ip_index = .i64_type, .legacy = undefined }; + pub const @"i128": Type = .{ .ip_index = .i128_type, .legacy = undefined }; pub const @"f16": Type = .{ .ip_index = .f16_type, .legacy = undefined }; pub const @"f32": Type = .{ .ip_index = .f32_type, .legacy = undefined }; -- cgit v1.2.3 From 5e636643d2a36c777a607b65cfd1abbb1822ad1e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 4 May 2023 20:30:25 -0700 Subject: stage2: move many Type encodings to InternPool Notably, `vector`. Additionally, all alternate encodings of `pointer`, `optional`, and `array`. --- src/Air.zig | 13 +- src/InternPool.zig | 92 ++- src/Liveness.zig | 8 +- src/Liveness/Verify.zig | 2 +- src/Module.zig | 41 +- src/Sema.zig | 1075 +++++++++++++------------ src/TypedValue.zig | 32 +- src/arch/aarch64/CodeGen.zig | 80 +- src/arch/arm/CodeGen.zig | 83 +- src/arch/riscv64/CodeGen.zig | 14 +- src/arch/sparc64/CodeGen.zig | 37 +- src/arch/wasm/CodeGen.zig | 168 ++-- src/arch/x86_64/CodeGen.zig | 314 ++++---- src/arch/x86_64/abi.zig | 6 +- src/codegen.zig | 36 +- src/codegen/c.zig | 203 ++--- src/codegen/c/type.zig | 9 +- src/codegen/llvm.zig | 326 ++++---- src/codegen/spirv.zig | 64 +- src/codegen/spirv/Module.zig | 3 +- src/link/Dwarf.zig | 11 +- src/link/Wasm.zig | 4 +- src/print_air.zig | 6 +- src/type.zig | 1770 +++++++++++------------------------------- src/value.zig | 208 ++--- 25 files changed, 1834 insertions(+), 2771 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index 4124788605..64212d3b9a 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1375,7 +1375,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .bool_to_int => return Type.u1, - .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0), + .tag_name, .error_name => return Type.const_slice_u8_sentinel_0, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); @@ -1384,18 +1384,21 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .slice_elem_val, .ptr_elem_val, .array_elem_val => { const ptr_ty = air.typeOf(datas[inst].bin_op.lhs, ip); - return ptr_ty.elemType(); + return ptr_ty.childTypeIp(ip); }, .atomic_load => { const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr, ip); - return ptr_ty.elemType(); + return ptr_ty.childTypeIp(ip); }, .atomic_rmw => { const ptr_ty = air.typeOf(datas[inst].pl_op.operand, ip); - return ptr_ty.elemType(); + return ptr_ty.childTypeIp(ip); }, - .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand, ip).childType(), + .reduce, .reduce_optimized => { + const operand_ty = air.typeOf(datas[inst].reduce.operand, ip); + return ip.indexToKey(operand_ty.ip_index).vector_type.child.toType(); + }, .mul_add => return air.typeOf(datas[inst].pl_op.operand, ip), .select => { diff --git a/src/InternPool.zig b/src/InternPool.zig index 3ecc18c426..295a694e2a 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -31,28 +31,10 @@ const KeyAdapter = struct { pub const Key = union(enum) { int_type: IntType, - ptr_type: struct { - elem_type: Index, - sentinel: Index = .none, - alignment: u16 = 0, - size: std.builtin.Type.Pointer.Size, - is_const: bool = false, - is_volatile: bool = false, - is_allowzero: bool = false, - address_space: std.builtin.AddressSpace = .generic, - }, - array_type: struct { - len: u64, - child: Index, - sentinel: Index, - }, - vector_type: struct { - len: u32, - child: Index, - }, - optional_type: struct { - payload_type: Index, - }, + ptr_type: PtrType, + array_type: ArrayType, + vector_type: VectorType, + opt_type: Index, error_union_type: struct { error_set_type: Index, payload_type: Index, @@ -87,6 +69,47 @@ pub const Key = union(enum) { pub const IntType = std.builtin.Type.Int; + pub const PtrType = struct { + elem_type: Index, + sentinel: Index = .none, + /// If zero use pointee_type.abiAlignment() + /// When creating pointer types, if alignment is equal to pointee type + /// abi alignment, this value should be set to 0 instead. + alignment: u16 = 0, + /// If this is non-zero it means the pointer points to a sub-byte + /// range of data, which is backed by a "host integer" with this + /// number of bytes. + /// When host_size=pointee_abi_size and bit_offset=0, this must be + /// represented with host_size=0 instead. + host_size: u16 = 0, + bit_offset: u16 = 0, + vector_index: VectorIndex = .none, + size: std.builtin.Type.Pointer.Size = .One, + is_const: bool = false, + is_volatile: bool = false, + is_allowzero: bool = false, + /// See src/target.zig defaultAddressSpace function for how to obtain + /// an appropriate value for this field. + address_space: std.builtin.AddressSpace = .generic, + + pub const VectorIndex = enum(u32) { + none = std.math.maxInt(u32), + runtime = std.math.maxInt(u32) - 1, + _, + }; + }; + + pub const ArrayType = struct { + len: u64, + child: Index, + sentinel: Index, + }; + + pub const VectorType = struct { + len: u32, + child: Index, + }; + pub fn hash32(key: Key) u32 { return @truncate(u32, key.hash64()); } @@ -106,7 +129,7 @@ pub const Key = union(enum) { .ptr_type, .array_type, .vector_type, - .optional_type, + .opt_type, .error_union_type, .simple_type, .simple_value, @@ -159,8 +182,8 @@ pub const Key = union(enum) { const b_info = b.vector_type; return std.meta.eql(a_info, b_info); }, - .optional_type => |a_info| { - const b_info = b.optional_type; + .opt_type => |a_info| { + const b_info = b.opt_type; return std.meta.eql(a_info, b_info); }, .error_union_type => |a_info| { @@ -220,7 +243,7 @@ pub const Key = union(enum) { .ptr_type, .array_type, .vector_type, - .optional_type, + .opt_type, .error_union_type, .simple_type, .struct_type, @@ -630,6 +653,7 @@ pub const Tag = enum(u8) { /// data is payload to Vector. type_vector, /// A fully explicitly specified pointer type. + /// TODO actually this is missing some stuff like bit_offset /// data is payload to Pointer. type_pointer, /// An optional type. @@ -893,7 +917,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }; }, - .type_optional => .{ .optional_type = .{ .payload_type = @intToEnum(Index, data) } }, + .type_optional => .{ .opt_type = @intToEnum(Index, data) }, .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), @@ -971,10 +995,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }), }); }, - .optional_type => |optional_type| { + .opt_type => |opt_type| { ip.items.appendAssumeCapacity(.{ .tag = .type_optional, - .data = @enumToInt(optional_type.payload_type), + .data = @enumToInt(opt_type), }); }, .error_union_type => |error_union_type| { @@ -1192,3 +1216,13 @@ test "basic usage" { } }); try std.testing.expect(another_array_i32 == array_i32); } + +pub fn childType(ip: InternPool, i: Index) Index { + return switch (ip.indexToKey(i)) { + .ptr_type => |ptr_type| ptr_type.elem_type, + .vector_type => |vector_type| vector_type.child, + .array_type => |array_type| array_type.child, + .opt_type => |child| child, + else => unreachable, + }; +} diff --git a/src/Liveness.zig b/src/Liveness.zig index 01fbee9e36..19659940af 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -225,6 +225,7 @@ pub fn categorizeOperand( air: Air, inst: Air.Inst.Index, operand: Air.Inst.Index, + ip: InternPool, ) OperandCategory { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); @@ -534,7 +535,7 @@ pub fn categorizeOperand( .aggregate_init => { const ty_pl = air_datas[inst].ty_pl; const aggregate_ty = air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -625,7 +626,7 @@ pub fn categorizeOperand( var operand_live: bool = true; for (air.extra[cond_extra.end..][0..2]) |cond_inst| { - if (l.categorizeOperand(air, cond_inst, operand) == .tomb) + if (l.categorizeOperand(air, cond_inst, operand, ip) == .tomb) operand_live = false; switch (air_tags[cond_inst]) { @@ -872,6 +873,7 @@ fn analyzeInst( data: *LivenessPassData(pass), inst: Air.Inst.Index, ) Allocator.Error!void { + const ip = a.intern_pool; const inst_tags = a.air.instructions.items(.tag); const inst_datas = a.air.instructions.items(.data); @@ -1140,7 +1142,7 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index e05f1814ce..7059fec507 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -325,7 +325,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); var bt = self.liveness.iterateBigTomb(inst); diff --git a/src/Module.zig b/src/Module.zig index 5c84b123c1..67ca91266c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5805,7 +5805,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // is unused so it just has to be a no-op. sema.air_instructions.set(ptr_inst.*, .{ .tag = .alloc, - .data = .{ .ty = Type.initTag(.single_const_pointer_to_comptime_int) }, + .data = .{ .ty = Type.single_const_pointer_to_comptime_int }, }); } } @@ -6545,7 +6545,7 @@ pub fn populateTestFunctions( } const decl = mod.declPtr(decl_index); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType(); + const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).childType(mod); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions @@ -6575,7 +6575,7 @@ pub fn populateTestFunctions( errdefer name_decl_arena.deinit(); const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try Type.Tag.array_u8.create(name_decl_arena.allocator(), bytes.len), + .ty = try Type.array(name_decl_arena.allocator(), bytes.len, null, Type.u8, mod), .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), }); try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); @@ -6609,7 +6609,12 @@ pub fn populateTestFunctions( { // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. - const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena)); + const new_ty = try Type.ptr(arena, mod, .{ + .size = .Slice, + .pointee_type = try tmp_test_fn_ty.copy(arena), + .mutable = false, + .@"addrspace" = .generic, + }); const new_var = try gpa.create(Var); errdefer gpa.destroy(new_var); new_var.* = decl.val.castTag(.variable).?.data.*; @@ -6819,6 +6824,34 @@ pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allo return i.toType(); } +pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type { + const i = try intern(mod, .{ .array_type = info }); + return i.toType(); +} + +pub fn vectorType(mod: *Module, info: InternPool.Key.VectorType) Allocator.Error!Type { + const i = try intern(mod, .{ .vector_type = info }); + return i.toType(); +} + +pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!Type { + const i = try intern(mod, .{ .opt_type = child_type }); + return i.toType(); +} + +pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { + const i = try intern(mod, .{ .ptr_type = info }); + return i.toType(); +} + +pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ .elem_type = child_type.ip_index }); +} + +pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } diff --git a/src/Sema.zig b/src/Sema.zig index 7389719301..87df2f23e1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -585,13 +585,18 @@ pub const Block = struct { } fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref { + const sema = block.sema; + const mod = sema.mod; return block.addInst(.{ .tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector, .data = .{ .ty_pl = .{ - .ty = try block.sema.addType( - try Type.vector(block.sema.arena, block.sema.typeOf(lhs).vectorLen(), Type.bool), + .ty = try sema.addType( + try mod.vectorType(.{ + .len = sema.typeOf(lhs).vectorLen(mod), + .child = .bool_type, + }), ), - .payload = try block.sema.addExtra(Air.VectorCmp{ + .payload = try sema.addExtra(Air.VectorCmp{ .lhs = lhs, .rhs = rhs, .op = Air.VectorCmp.encodeOp(cmp_op), @@ -1760,7 +1765,7 @@ pub fn resolveConstString( reason: []const u8, ) ![]u8 { const air_inst = try sema.resolveInst(zir_ref); - const wanted_type = Type.initTag(.const_slice_u8); + const wanted_type = Type.const_slice_u8; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, reason); return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); @@ -1788,7 +1793,8 @@ fn analyzeAsType( } pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.error_return_trace)) return; assert(!block.is_comptime); var err_trace_block = block.makeSubBlock(); @@ -1798,13 +1804,13 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) // var addrs: [err_return_trace_addr_count]usize = undefined; const err_return_trace_addr_count = 32; - const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, sema.mod); - const addrs_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, addr_arr_ty)); + const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, mod); + const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const st_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty)); + const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "instruction_addresses", src, true); @@ -2101,11 +2107,10 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { const mod = sema.mod; - const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType() else object_ty; + const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty; if (inner_ty.zigTypeTag(mod) == .Optional) opt: { - var buf: Type.Payload.ElemType = undefined; - const child_ty = inner_ty.optionalChild(&buf); + const child_ty = inner_ty.optionalChild(mod); if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt; const msg = msg: { const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); @@ -2132,7 +2137,7 @@ fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8) switch (ty.zigTypeTag(mod)) { .Array => return mem.eql(u8, field_name, "len"), .Pointer => { - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); if (ptr_info.size == .Slice) { return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len"); } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { @@ -2504,6 +2509,7 @@ fn coerceResultPtr( dummy_operand: Air.Inst.Ref, trash_block: *Block, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const target = sema.mod.getTarget(); const addr_space = target_util.defaultAddressSpace(target, .local); const pointee_ty = sema.typeOf(dummy_operand); @@ -2547,7 +2553,7 @@ fn coerceResultPtr( return sema.addConstant(ptr_ty, ptr_val); } if (pointee_ty.eql(Type.null, sema.mod)) { - const opt_ty = sema.typeOf(new_ptr).childType(); + const opt_ty = sema.typeOf(new_ptr).childType(mod); const null_inst = try sema.addConstant(opt_ty, Value.null); _ = try block.addBinOp(.store, new_ptr, null_inst); return Air.Inst.Ref.void_value; @@ -3394,7 +3400,7 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer) - operand_ty.childType() + operand_ty.childType(mod) else operand_ty; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; @@ -3430,7 +3436,7 @@ fn indexablePtrLen( const mod = sema.mod; const object_ty = sema.typeOf(object); const is_pointer_to = object_ty.isSinglePointer(mod); - const indexable_ty = if (is_pointer_to) object_ty.childType() else object_ty; + const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); return sema.fieldVal(block, src, object, "len", src); } @@ -3441,9 +3447,10 @@ fn indexablePtrLenOrNone( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); - if (operand_ty.ptrSize() == .Many) return .none; + if (operand_ty.ptrSize(mod) == .Many) return .none; return sema.fieldVal(block, src, operand, "len", src); } @@ -3529,11 +3536,12 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const alloc = try sema.resolveInst(inst_data.operand); const alloc_ty = sema.typeOf(alloc); - var ptr_info = alloc_ty.ptrInfo().data; + var ptr_info = alloc_ty.ptrInfo(mod); const elem_ty = ptr_info.pointee_type; // Detect if all stores to an `.alloc` were comptime-known. @@ -3589,9 +3597,10 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { + const mod = sema.mod; const alloc_ty = sema.typeOf(alloc); - var ptr_info = alloc_ty.ptrInfo().data; + var ptr_info = alloc_ty.ptrInfo(mod); ptr_info.mutable = false; const const_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); @@ -3947,13 +3956,13 @@ fn zirArrayBasePtr( const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { + while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; - const elem_ty = sema.typeOf(base_ptr).childType(); + const elem_ty = sema.typeOf(base_ptr).childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Array, .Vector => return base_ptr, .Struct => if (elem_ty.isTuple()) { @@ -3962,7 +3971,7 @@ fn zirArrayBasePtr( }, else => {}, } - return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType()); + return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod)); } fn zirFieldBasePtr( @@ -3976,18 +3985,18 @@ fn zirFieldBasePtr( const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag(mod)) { + while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; - const elem_ty = sema.typeOf(base_ptr).childType(); + const elem_ty = sema.typeOf(base_ptr).childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Struct, .Union => return base_ptr, else => {}, } - return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType()); + return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod)); } fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -4129,7 +4138,7 @@ fn validateArrayInitTy( switch (ty.zigTypeTag(mod)) { .Array => { - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count != array_len) { return sema.fail(block, src, "expected {d} array elements; found {d}", .{ array_len, extra.init_count, @@ -4138,7 +4147,7 @@ fn validateArrayInitTy( return; }, .Vector => { - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count != array_len) { return sema.fail(block, src, "expected {d} vector elements; found {d}", .{ array_len, extra.init_count, @@ -4148,7 +4157,7 @@ fn validateArrayInitTy( }, .Struct => if (ty.isTuple()) { _ = try sema.resolveTypeFields(ty); - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ array_len, extra.init_count, @@ -4194,7 +4203,7 @@ fn zirValidateStructInit( const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); - const agg_ty = sema.typeOf(object_ptr).childType(); + const agg_ty = sema.typeOf(object_ptr).childType(mod); switch (agg_ty.zigTypeTag(mod)) { .Struct => return sema.validateStructInit( block, @@ -4350,6 +4359,7 @@ fn validateStructInit( init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const gpa = sema.gpa; // Maps field index to field_ptr index of where it was already initialized. @@ -4425,14 +4435,13 @@ fn validateStructInit( try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); - const field_ty = sema.typeOf(default_field_ptr).childType(); + const field_ty = sema.typeOf(default_field_ptr).childType(mod); const init = try sema.addConstant(field_ty, default_val); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } if (root_msg) |msg| { if (struct_ty.castTag(.@"struct")) |struct_obj| { - const mod = sema.mod; const fqn = try struct_obj.data.getFullyQualifiedName(mod); defer gpa.free(fqn); try mod.errNoteNonLazy( @@ -4605,7 +4614,7 @@ fn validateStructInit( try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); - const field_ty = sema.typeOf(default_field_ptr).childType(); + const field_ty = sema.typeOf(default_field_ptr).childType(mod); const init = try sema.addConstant(field_ty, field_values[i]); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } @@ -4624,8 +4633,8 @@ fn zirValidateArrayInit( const first_elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data; const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr); - const array_ty = sema.typeOf(array_ptr).childType(); - const array_len = array_ty.arrayLen(); + const array_ty = sema.typeOf(array_ptr).childType(mod); + const array_len = array_ty.arrayLen(mod); if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) { .Struct => { @@ -4670,10 +4679,10 @@ fn zirValidateArrayInit( // at comptime so we have almost nothing to do here. However, in case of a // sentinel-terminated array, the sentinel will not have been populated by // any ZIR instructions at comptime; we need to do that here. - if (array_ty.sentinel()) |sentinel_val| { + if (array_ty.sentinel(mod)) |sentinel_val| { const array_len_ref = try sema.addIntUnsigned(Type.usize, array_len); const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true); - const sentinel = try sema.addConstant(array_ty.childType(), sentinel_val); + const sentinel = try sema.addConstant(array_ty.childType(mod), sentinel_val); try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store); } return; @@ -4685,7 +4694,7 @@ fn zirValidateArrayInit( // Collect the comptime element values in case the array literal ends up // being comptime-known. - const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel()); + const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod)); const element_vals = try sema.arena.alloc(Value, array_len_s); const opt_opv = try sema.typeHasOnePossibleValue(array_ty); const air_tags = sema.air_instructions.items(.tag); @@ -4784,7 +4793,7 @@ fn zirValidateArrayInit( // Our task is to delete all the `elem_ptr` and `store` instructions, and insert // instead a single `store` to the array_ptr with a comptime struct value. // Also to populate the sentinel value, if any. - if (array_ty.sentinel()) |sentinel_val| { + if (array_ty.sentinel(mod)) |sentinel_val| { element_vals[instrs.len] = sentinel_val; } @@ -4806,13 +4815,13 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr if (operand_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)}); - } else switch (operand_ty.ptrSize()) { + } else switch (operand_ty.ptrSize(mod)) { .One, .C => {}, .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(sema.mod)}), .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(sema.mod)}), } - if ((try sema.typeHasOnePossibleValue(operand_ty.childType())) != null) { + if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) { // No need to validate the actual pointer value, we don't need it! return; } @@ -5132,7 +5141,7 @@ fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air defer anon_decl.deinit(); const decl_index = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), gop.key_ptr.len), + try Type.array(anon_decl.arena(), gop.key_ptr.len, Value.zero, Type.u8, mod), try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*), 0, // default alignment ); @@ -6003,10 +6012,11 @@ fn addDbgVar( air_tag: Air.Inst.Tag, name: []const u8, ) CompileError!void { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); switch (air_tag) { .dbg_var_ptr => { - if (!(try sema.typeHasRuntimeBits(operand_ty.childType()))) return; + if (!(try sema.typeHasRuntimeBits(operand_ty.childType(mod)))) return; }, .dbg_var_val => { if (!(try sema.typeHasRuntimeBits(operand_ty))) return; @@ -6238,7 +6248,7 @@ fn popErrorReturnTrace( const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, "index", src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); @@ -6263,7 +6273,7 @@ fn popErrorReturnTrace( // If non-error, then pop the error return trace by restoring the index. const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, "index", src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); @@ -6456,16 +6466,15 @@ fn checkCallArgumentCount( switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { - const ptr_info = callee_ty.ptrInfo().data; + const ptr_info = callee_ty.ptrInfo(mod); if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const opt_child = callee_ty.optionalChild(&buf); + const opt_child = callee_ty.optionalChild(mod); if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and - opt_child.childType().zigTypeTag(mod) == .Fn)) + opt_child.childType(mod).zigTypeTag(mod) == .Fn)) { const msg = msg: { const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{ @@ -6529,7 +6538,7 @@ fn callBuiltin( switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { - const ptr_info = callee_ty.ptrInfo().data; + const ptr_info = callee_ty.ptrInfo(mod); if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } @@ -7929,7 +7938,7 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } else if (child_type.zigTypeTag(mod) == .Null) { return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); } - const opt_type = try Type.optional(sema.arena, child_type); + const opt_type = try Type.optional(sema.arena, child_type, mod); return sema.addType(opt_type); } @@ -7949,16 +7958,17 @@ fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const len = try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known"); + const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known")); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); try sema.checkVectorElemType(block, elem_type_src, elem_type); - const vector_type = try Type.Tag.vector.create(sema.arena, .{ - .len = @intCast(u32, len), - .elem_type = elem_type, + const vector_type = try mod.vectorType(.{ + .len = len, + .child = elem_type.ip_index, }); return sema.addType(vector_type); } @@ -8377,16 +8387,16 @@ fn analyzeOptionalPayloadPtr( const optional_ptr_ty = sema.typeOf(optional_ptr); assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer); - const opt_type = optional_ptr_ty.elemType(); + const opt_type = optional_ptr_ty.childType(mod); if (opt_type.zigTypeTag(mod) != .Optional) { return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)}); } - const child_type = try opt_type.optionalChildAlloc(sema.arena); + const child_type = opt_type.optionalChild(mod); const child_pointer = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = child_type, .mutable = !optional_ptr_ty.isConstPtr(), - .@"addrspace" = optional_ptr_ty.ptrAddressSpace(), + .@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| { @@ -8401,7 +8411,7 @@ fn analyzeOptionalPayloadPtr( child_pointer, try Value.Tag.opt_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(), + .container_ty = optional_ptr_ty.childType(mod), }), ); } @@ -8414,7 +8424,7 @@ fn analyzeOptionalPayloadPtr( child_pointer, try Value.Tag.opt_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(), + .container_ty = optional_ptr_ty.childType(mod), }), ); } @@ -8448,14 +8458,14 @@ fn zirOptionalPayload( const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const result_ty = switch (operand_ty.zigTypeTag(mod)) { - .Optional => try operand_ty.optionalChildAlloc(sema.arena), + .Optional => operand_ty.optionalChild(mod), .Pointer => t: { - if (operand_ty.ptrSize() != .C) { + if (operand_ty.ptrSize(mod) != .C) { return sema.failWithExpectedOptionalType(block, src, operand_ty); } // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); break :t try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = try ptr_info.pointee_type.copy(sema.arena), .@"align" = ptr_info.@"align", @@ -8569,18 +8579,18 @@ fn analyzeErrUnionPayloadPtr( const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { + if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.elemType().fmt(sema.mod), + operand_ty.childType(mod).fmt(sema.mod), }); } - const err_union_ty = operand_ty.elemType(); + const err_union_ty = operand_ty.childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(), - .@"addrspace" = operand_ty.ptrAddressSpace(), + .@"addrspace" = operand_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| { @@ -8596,7 +8606,7 @@ fn analyzeErrUnionPayloadPtr( operand_pointer_ty, try Value.Tag.eu_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = operand_ty.elemType(), + .container_ty = operand_ty.childType(mod), }), ); } @@ -8609,7 +8619,7 @@ fn analyzeErrUnionPayloadPtr( operand_pointer_ty, try Value.Tag.eu_payload_ptr.create(sema.arena, .{ .container_ptr = ptr_val, - .container_ty = operand_ty.elemType(), + .container_ty = operand_ty.childType(mod), }), ); } @@ -8674,13 +8684,13 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag(mod) != .ErrorUnion) { + if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.elemType().fmt(sema.mod), + operand_ty.childType(mod).fmt(sema.mod), }); } - const result_ty = operand_ty.elemType().errorUnionSet(); + const result_ty = operand_ty.childType(mod).errorUnionSet(); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { @@ -10119,7 +10129,7 @@ fn zirSwitchCapture( const operand_is_ref = cond_tag == .switch_cond_ref; const operand_ptr = try sema.resolveInst(cond_info.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); - const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty; + const operand_ty = if (operand_is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty; if (block.inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; @@ -10131,9 +10141,9 @@ fn zirSwitchCapture( if (is_ref) { const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), .@"volatile" = operand_ptr_ty.isVolatilePtr(), - .@"addrspace" = operand_ptr_ty.ptrAddressSpace(), + .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return sema.addConstant( ptr_field_ty, @@ -10150,9 +10160,9 @@ fn zirSwitchCapture( if (is_ref) { const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), .@"volatile" = operand_ptr_ty.isVolatilePtr(), - .@"addrspace" = operand_ptr_ty.ptrAddressSpace(), + .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty); } else { @@ -10235,7 +10245,7 @@ fn zirSwitchCapture( const field_ty_ptr = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = first_field.ty, .@"addrspace" = .generic, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), }); if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { @@ -10311,7 +10321,7 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile const cond_data = zir_datas[Zir.refToIndex(inst_data.operand).?].un_node; const operand_ptr = try sema.resolveInst(cond_data.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); - const operand_ty = if (is_ref) operand_ptr_ty.childType() else operand_ptr_ty; + const operand_ty = if (is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty; if (operand_ty.zigTypeTag(mod) != .Union) { const msg = msg: { @@ -10448,7 +10458,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const cond_index = Zir.refToIndex(extra.data.operand).?; const raw_operand = sema.resolveInst(zir_data[cond_index].un_node.operand) catch unreachable; const target_ty = sema.typeOf(raw_operand); - break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.elemType() else target_ty; + break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.childType(mod) else target_ty; }; const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union; @@ -12132,7 +12142,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. embed_file.owner_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len), + try Type.array(anon_decl.arena(), embed_file.bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), 0, // default alignment ); @@ -12200,7 +12210,7 @@ fn zirShl( const bit_value = Value.initPayload(&bits_payload.base); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { @@ -12220,7 +12230,7 @@ fn zirShl( } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { @@ -12388,7 +12398,7 @@ fn zirShr( const bit_value = Value.initPayload(&bits_payload.base); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { @@ -12408,7 +12418,7 @@ fn zirShr( } if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { var elem_value_buf: Value.ElemValueBuffer = undefined; const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { @@ -12571,7 +12581,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (val.isUndef()) { return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { - const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen()); + const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -12768,8 +12778,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); const mod = sema.mod; const ptr_addrspace = p: { - if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(); - if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(); + if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod); + if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod); break :p null; }; @@ -12883,9 +12893,9 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins const mod = sema.mod; const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag(mod)) { - .Array => return operand_ty.arrayInfo(), + .Array => return operand_ty.arrayInfo(mod), .Pointer => { - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); switch (ptr_info.size) { // TODO: in the Many case here this should only work if the type // has a sentinel, and this code should compute the length based @@ -12900,7 +12910,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins }, .One => { if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { - return ptr_info.pointee_type.arrayInfo(); + return ptr_info.pointee_type.arrayInfo(mod); } }, .C => {}, @@ -12912,7 +12922,7 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins return .{ .elem_type = peer_ty.elemType2(mod), .sentinel = null, - .len = operand_ty.arrayLen(), + .len = operand_ty.arrayLen(mod), }; } }, @@ -13035,7 +13045,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); - const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace() else null; + const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { @@ -14022,7 +14032,7 @@ fn intRem( ) CompileError!Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -14484,7 +14494,10 @@ fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { const mod = sema.mod; - const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; + const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .u1_type, + }) else Type.u1; const types = try sema.arena.alloc(Type, 2); const values = try sema.arena.alloc(Value, 2); @@ -14520,7 +14533,7 @@ fn analyzeArithmetic( const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { + if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize(mod)) { .One, .Slice => {}, .Many, .C => { const air_tag: Air.Inst.Tag = switch (zir_tag) { @@ -14993,9 +15006,9 @@ fn analyzePtrArithmetic( const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr); const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Array) - ptr_info.pointee_type.childType() + ptr_info.pointee_type.childType(mod) else ptr_info.pointee_type; @@ -15466,7 +15479,10 @@ fn cmpSelf( if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); if (resolved_type.zigTypeTag(mod) == .Vector) { - const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool); + const result_ty = try mod.vectorType(.{ + .len = resolved_type.vectorLen(mod), + .child = .bool_type, + }); const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); return sema.addConstant(result_ty, cmp_val); } @@ -15767,6 +15783,7 @@ fn zirBuiltinSrc( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{}); @@ -15778,7 +15795,7 @@ fn zirBuiltinSrc( const name = std.mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len - 1), + try Type.array(anon_decl.arena(), bytes.len - 1, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes), 0, // default alignment ); @@ -15791,7 +15808,7 @@ fn zirBuiltinSrc( // The compiler must not call realpath anywhere. const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), name.len), + try Type.array(anon_decl.arena(), name.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), 0, // default alignment ); @@ -16024,7 +16041,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Pointer => { - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) try Value.Tag.int_u64.create(sema.arena, info.@"align") else @@ -16059,7 +16076,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Array => { - const info = ty.arrayInfo(); + const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 3); // len: comptime_int, field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); @@ -16077,7 +16094,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Vector => { - const info = ty.arrayInfo(); + const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // len: comptime_int, field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); @@ -16095,7 +16112,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Optional => { const field_values = try sema.arena.alloc(Value, 1); // child: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, try ty.optionalChildAlloc(sema.arena)); + field_values[0] = try Value.Tag.ty.create(sema.arena, ty.optionalChild(mod)); return sema.addConstant( type_info_ty, @@ -16141,7 +16158,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16250,7 +16267,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16338,7 +16355,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16448,7 +16465,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16490,7 +16507,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16666,14 +16683,15 @@ fn typeInfoNamespaceDecls( decl_vals: *std.ArrayList(Value), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { + const mod = sema.mod; const gop = try seen_namespaces.getOrPut(namespace); if (gop.found_existing) return; const decls = namespace.decls.keys(); for (decls) |decl_index| { - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.kind == .@"usingnamespace") { if (decl.analysis == .in_progress) continue; - try sema.mod.ensureDeclAnalyzed(decl_index); + try mod.ensureDeclAnalyzed(decl_index); const new_ns = decl.val.toType().getNamespace().?; try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); continue; @@ -16684,7 +16702,7 @@ fn typeInfoNamespaceDecls( defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16770,9 +16788,9 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi .Vector => { const elem_ty = operand.elemType2(mod); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); - return Type.Tag.vector.create(sema.arena, .{ - .len = operand.vectorLen(), - .elem_type = log2_elem_ty, + return mod.vectorType(.{ + .len = operand.vectorLen(mod), + .child = log2_elem_ty.ip_index, }); }, else => {}, @@ -17207,7 +17225,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr _ = try sema.analyzeBodyInner(&sub_block, body); const operand_ty = sema.typeOf(operand); - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); const res_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = err_union_ty.errorUnionPayload(), .@"addrspace" = ptr_info.@"addrspace", @@ -17398,6 +17416,7 @@ fn retWithErrTracing( ret_tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) CompileError!Zir.Inst.Index { + const mod = sema.mod; const need_check = switch (is_non_err) { .bool_true => { _ = try block.addUnOp(ret_tag, operand); @@ -17409,7 +17428,7 @@ fn retWithErrTracing( const gpa = sema.gpa; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const return_err_fn = try sema.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; @@ -17755,7 +17774,7 @@ fn structInitEmpty( fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref { const mod = sema.mod; - const arr_len = obj_ty.arrayLen(); + const arr_len = obj_ty.arrayLen(mod); if (arr_len != 0) { if (obj_ty.zigTypeTag(mod) == .Array) { return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len}); @@ -17763,7 +17782,7 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); } } - if (obj_ty.sentinel()) |sentinel| { + if (obj_ty.sentinel(mod)) |sentinel| { const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); return sema.addConstant(obj_ty, val); } else { @@ -18199,6 +18218,7 @@ fn zirArrayInit( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -18208,8 +18228,7 @@ fn zirArrayInit( assert(args.len >= 2); // array_ty + at least one element const array_ty = try sema.resolveType(block, src, args[0]); - const sentinel_val = array_ty.sentinel(); - const mod = sema.mod; + const sentinel_val = array_ty.sentinel(mod); const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null)); defer gpa.free(resolved_args); @@ -18489,14 +18508,16 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { } fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { + const mod = sema.mod; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const opt_ptr_stack_trace_ty = try Type.Tag.optional_single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); + const opt_ptr_stack_trace_ty = try Type.optional(sema.arena, ptr_stack_trace_ty, mod); if (sema.owner_func != null and sema.owner_func.?.calls_or_awaits_errorable_fn and - sema.mod.comp.bin_file.options.error_return_tracing and - sema.mod.backendSupportsFeature(.error_return_trace)) + mod.comp.bin_file.options.error_return_tracing and + mod.backendSupportsFeature(.error_return_trace)) { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); } @@ -18585,8 +18606,11 @@ fn zirUnaryMath( switch (operand_ty.zigTypeTag(mod)) { .Vector => { const scalar_ty = operand_ty.scalarType(mod); - const vec_len = operand_ty.vectorLen(); - const result_ty = try Type.vector(sema.arena, vec_len, scalar_ty); + const vec_len = operand_ty.vectorLen(mod); + const result_ty = try mod.vectorType(.{ + .len = vec_len, + .child = scalar_ty.ip_index, + }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); @@ -18730,12 +18754,15 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const len_val = struct_val[0]; const child_val = struct_val[1]; - const len = len_val.toUnsignedInt(mod); + const len = @intCast(u32, len_val.toUnsignedInt(mod)); const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); - const ty = try Type.vector(sema.arena, len, try child_ty.copy(sema.arena)); + const ty = try mod.vectorType(.{ + .len = len, + .child = child_ty.ip_index, + }); return sema.addType(ty); }, .Float => { @@ -18872,7 +18899,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const child_ty = try child_val.toType().copy(sema.arena); - const ty = try Type.optional(sema.arena, child_ty); + const ty = try Type.optional(sema.arena, child_ty, mod); return sema.addType(ty); }, .ErrorUnion => { @@ -18912,7 +18939,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, sema.mod); + const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, sema.mod); const kv = try mod.getErrorValue(name_str); const gop = names.getOrPutAssumeCapacity(kv.key); @@ -19038,7 +19065,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const value_val = field_struct_val[1]; const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), + Type.const_slice_u8, new_decl_arena_allocator, sema.mod, ); @@ -19215,7 +19242,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const alignment_val = field_struct_val[2]; const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), + Type.const_slice_u8, new_decl_arena_allocator, sema.mod, ); @@ -19482,7 +19509,7 @@ fn reifyStruct( } const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), + Type.const_slice_u8, new_decl_arena_allocator, mod, ); @@ -19626,7 +19653,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst try sema.checkPtrOperand(block, ptr_src, ptr_ty); - var ptr_info = ptr_ty.ptrInfo().data; + var ptr_info = ptr_ty.ptrInfo(mod); const src_addrspace = ptr_info.@"addrspace"; if (!target_util.addrSpaceCastIsValid(sema.mod.getTarget(), src_addrspace, dest_addrspace)) { const msg = msg: { @@ -19641,7 +19668,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst ptr_info.@"addrspace" = dest_addrspace; const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional) - try Type.optional(sema.arena, dest_ptr_ty) + try Type.optional(sema.arena, dest_ptr_ty, mod) else dest_ptr_ty; @@ -19731,6 +19758,7 @@ fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) } fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, ty_src, inst_data.operand); @@ -19738,10 +19766,10 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try ty.nameAllocArena(anon_decl.arena(), sema.mod); + const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), + try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -19842,7 +19870,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const elem_ty = ptr_ty.elemType2(mod); const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { const msg = msg: { const msg = try sema.errMsg(block, type_src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19987,8 +20015,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air try sema.checkPtrType(block, dest_ty_src, dest_ty); try sema.checkPtrOperand(block, operand_src, operand_ty); - const operand_info = operand_ty.ptrInfo().data; - const dest_info = dest_ty.ptrInfo().data; + const operand_info = operand_ty.ptrInfo(mod); + const dest_info = dest_ty.ptrInfo(mod); if (!operand_info.mutable and dest_info.mutable) { const msg = msg: { const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{}); @@ -20042,12 +20070,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: { // Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result if (dest_ty.zigTypeTag(mod) == .Optional) { - var buf: Type.Payload.ElemType = undefined; - var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data; + var dest_ptr_info = dest_ty.optionalChild(mod).ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info)); + break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info), mod); } else { - var dest_ptr_info = dest_ty.ptrInfo().data; + var dest_ptr_info = dest_ty.ptrInfo(mod); dest_ptr_info.@"align" = operand_align; break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info); } @@ -20110,6 +20137,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20117,7 +20145,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); - var ptr_info = operand_ty.ptrInfo().data; + var ptr_info = operand_ty.ptrInfo(mod); ptr_info.mutable = true; const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); @@ -20130,6 +20158,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData } fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20137,7 +20166,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); - var ptr_info = operand_ty.ptrInfo().data; + var ptr_info = operand_ty.ptrInfo(mod); ptr_info.@"volatile" = false; const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); @@ -20163,7 +20192,10 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); const is_vector = operand_ty.zigTypeTag(mod) == .Vector; const dest_ty = if (is_vector) - try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty) + try mod.vectorType(.{ + .len = operand_ty.vectorLen(mod), + .child = dest_scalar_ty.ip_index, + }) else dest_scalar_ty; @@ -20218,7 +20250,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); } var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, operand_ty.vectorLen()); + const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); @@ -20245,7 +20277,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.checkPtrOperand(block, ptr_src, ptr_ty); - var ptr_info = ptr_ty.ptrInfo().data; + var ptr_info = ptr_ty.ptrInfo(mod); ptr_info.@"align" = dest_align; var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); if (ptr_ty.zigTypeTag(mod) == .Optional) { @@ -20314,8 +20346,11 @@ fn zirBitCount( const result_scalar_ty = try mod.smallestUnsignedInt(bits); switch (operand_ty.zigTypeTag(mod)) { .Vector => { - const vec_len = operand_ty.vectorLen(); - const result_ty = try Type.vector(sema.arena, vec_len, result_scalar_ty); + const vec_len = operand_ty.vectorLen(mod); + const result_ty = try mod.vectorType(.{ + .len = vec_len, + .child = result_scalar_ty.ip_index, + }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); @@ -20388,7 +20423,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (val.isUndef()) return sema.addConstUndef(operand_ty); - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -20437,7 +20472,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (val.isUndef()) return sema.addConstUndef(operand_ty); - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -20546,7 +20581,7 @@ fn checkInvalidPtrArithmetic( ) CompileError!void { const mod = sema.mod; switch (try ty.zigTypeTagOrPoison(mod)) { - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .One, .Slice => return, .Many, .C => return sema.fail( block, @@ -20676,7 +20711,7 @@ fn checkNumericType( const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, - .Vector => switch (ty.childType().zigTypeTag(mod)) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, @@ -20726,7 +20761,7 @@ fn checkAtomicPtrOperand( const ptr_ty = sema.typeOf(ptr); const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { - .Pointer => ptr_ty.ptrInfo().data, + .Pointer => ptr_ty.ptrInfo(mod), else => { const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); @@ -20797,7 +20832,7 @@ fn checkIntOrVector( switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int => return operand_ty, .Vector => { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ @@ -20821,7 +20856,7 @@ fn checkIntOrVectorAllowComptime( switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return operand_ty, .Vector => { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ @@ -20870,7 +20905,7 @@ fn checkSimdBinOp( const rhs_ty = sema.typeOf(uncasted_rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen() else null; + var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null; const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); @@ -20912,8 +20947,8 @@ fn checkVectorizableBinaryOperands( }; if (lhs_is_vector and rhs_is_vector) { - const lhs_len = lhs_ty.arrayLen(); - const rhs_len = rhs_ty.arrayLen(); + const lhs_len = lhs_ty.arrayLen(mod); + const rhs_len = rhs_ty.arrayLen(mod); if (lhs_len != rhs_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "vector length mismatch", .{}); @@ -20966,7 +21001,7 @@ fn resolveExportOptions( const name_operand = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); - const name_ty = Type.initTag(.const_slice_u8); + const name_ty = Type.const_slice_u8; const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); @@ -20975,7 +21010,7 @@ fn resolveExportOptions( const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); - const section_ty = Type.initTag(.const_slice_u8); + const section_ty = Type.const_slice_u8; const section = if (section_opt_val.optionalValue(mod)) |section_val| try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else @@ -21087,7 +21122,7 @@ fn zirCmpxchg( return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{}); } - const result_ty = try Type.optional(sema.arena, elem_ty); + const result_ty = try Type.optional(sema.arena, elem_ty, mod); // special case zero bit types if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) { @@ -21133,6 +21168,7 @@ fn zirCmpxchg( } fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -21141,9 +21177,9 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const scalar = try sema.resolveInst(extra.rhs); const scalar_ty = sema.typeOf(scalar); try sema.checkVectorElemType(block, scalar_src, scalar_ty); - const vector_ty = try Type.Tag.vector.create(sema.arena, .{ + const vector_ty = try mod.vectorType(.{ .len = len, - .elem_type = scalar_ty, + .child = scalar_ty.ip_index, }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { if (scalar_val.isUndef()) return sema.addConstUndef(vector_ty); @@ -21172,7 +21208,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)}); } - const scalar_ty = operand_ty.childType(); + const scalar_ty = operand_ty.childType(mod); // Type-check depending on operation. switch (operation) { @@ -21190,7 +21226,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }, } - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); if (vec_len == 0) { // TODO re-evaluate if we should introduce a "neutral value" for some operations, // e.g. zero for add and one for mul. @@ -21243,12 +21279,12 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var mask_ty = sema.typeOf(mask); const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(mask).arrayLen(), + .Array, .Vector => sema.typeOf(mask).arrayLen(mod), else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), }; - mask_ty = try Type.Tag.vector.create(sema.arena, .{ - .len = mask_len, - .elem_type = Type.i32, + mask_ty = try mod.vectorType(.{ + .len = @intCast(u32, mask_len), + .child = .i32_type, }); mask = try sema.coerce(block, mask_ty, mask, mask_src); const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known"); @@ -21272,13 +21308,13 @@ fn analyzeShuffle( var a = a_arg; var b = b_arg; - const res_ty = try Type.Tag.vector.create(sema.arena, .{ + const res_ty = try mod.vectorType(.{ .len = mask_len, - .elem_type = elem_ty, + .child = elem_ty.ip_index, }); var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(a).arrayLen(), + .Array, .Vector => sema.typeOf(a).arrayLen(mod), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(sema.mod), @@ -21286,7 +21322,7 @@ fn analyzeShuffle( }), }; var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) { - .Array, .Vector => sema.typeOf(b).arrayLen(), + .Array, .Vector => sema.typeOf(b).arrayLen(mod), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(sema.mod), @@ -21296,16 +21332,16 @@ fn analyzeShuffle( if (maybe_a_len == null and maybe_b_len == null) { return sema.addConstUndef(res_ty); } - const a_len = maybe_a_len orelse maybe_b_len.?; - const b_len = maybe_b_len orelse a_len; + const a_len = @intCast(u32, maybe_a_len orelse maybe_b_len.?); + const b_len = @intCast(u32, maybe_b_len orelse a_len); - const a_ty = try Type.Tag.vector.create(sema.arena, .{ + const a_ty = try mod.vectorType(.{ .len = a_len, - .elem_type = elem_ty, + .child = elem_ty.ip_index, }); - const b_ty = try Type.Tag.vector.create(sema.arena, .{ + const b_ty = try mod.vectorType(.{ .len = b_len, - .elem_type = elem_ty, + .child = elem_ty.ip_index, }); if (maybe_a_len == null) a = try sema.addConstUndef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); @@ -21437,15 +21473,21 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const pred_ty = sema.typeOf(pred_uncoerced); const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { - .Vector, .Array => pred_ty.arrayLen(), + .Vector, .Array => pred_ty.arrayLen(mod), else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), }; - const vec_len = try sema.usizeCast(block, pred_src, vec_len_u64); + const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64)); - const bool_vec_ty = try Type.vector(sema.arena, vec_len, Type.bool); + const bool_vec_ty = try mod.vectorType(.{ + .len = vec_len, + .child = .bool_type, + }); const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src); - const vec_ty = try Type.vector(sema.arena, vec_len, elem_ty); + const vec_ty = try mod.vectorType(.{ + .len = vec_len, + .child = elem_ty.ip_index, + }); const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src); const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src); @@ -21854,7 +21896,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } try sema.checkPtrOperand(block, ptr_src, field_ptr_ty); - const field_ptr_ty_info = field_ptr_ty.ptrInfo().data; + const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ .pointee_type = parent_ty.structFieldType(field_index), @@ -22052,8 +22094,8 @@ fn analyzeMinMax( } const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: { - const elem_ty = orig_ty.childType(); - const len = orig_ty.vectorLen(); + const elem_ty = orig_ty.childType(mod); + const len = orig_ty.vectorLen(mod); if (len == 0) break :blk orig_ty; if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats @@ -22068,7 +22110,10 @@ fn analyzeMinMax( } const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); - break :blk try Type.vector(sema.arena, len, refined_elem_ty); + break :blk try mod.vectorType(.{ + .len = len, + .child = refined_elem_ty.ip_index, + }); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats if (val.isUndef()) break :blk orig_ty; // can't refine undef @@ -22129,8 +22174,8 @@ fn analyzeMinMax( if (known_undef) break :refine; // can't refine undef const unrefined_ty = sema.typeOf(cur_minmax.?); const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector; - const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty; - const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty; + const comptime_elem_ty = if (is_vector) comptime_ty.childType(mod) else comptime_ty; + const unrefined_elem_ty = if (is_vector) unrefined_ty.childType(mod) else unrefined_ty; if (unrefined_elem_ty.isAnyFloat()) break :refine; // we can't refine floats @@ -22150,7 +22195,10 @@ fn analyzeMinMax( const final_elem_ty = try mod.intFittingRange(min_val, max_val); const final_ty = if (is_vector) - try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty) + try mod.vectorType(.{ + .len = unrefined_ty.vectorLen(mod), + .child = final_elem_ty.ip_index, + }) else final_elem_ty; @@ -22165,7 +22213,7 @@ fn analyzeMinMax( fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref { const mod = sema.mod; - const info = sema.typeOf(ptr).ptrInfo().data; + const info = sema.typeOf(ptr).ptrInfo(mod); if (info.size == .One) { // Already an array pointer. return ptr; @@ -22659,7 +22707,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; - const ty = Type.initTag(.const_slice_u8); + const ty = Type.const_slice_u8; const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known"); if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; @@ -22943,7 +22991,7 @@ fn resolveExternOptions( const name_ref = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known"); - const name = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); + const name = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src); const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known"); @@ -22957,7 +23005,7 @@ fn resolveExternOptions( const library_name = if (!library_name_val.isNull(mod)) blk: { const payload = library_name_val.castTag(.opt_payload).?.data; - const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); + const library_name = try payload.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); if (library_name.len == 0) { return sema.fail(block, library_src, "library name cannot be empty", .{}); } @@ -22994,7 +23042,7 @@ fn zirBuiltinExtern( if (!ty.isPtrAtRuntime(mod)) { return sema.fail(block, ty_src, "expected (optional) pointer", .{}); } - if (!try sema.validateExternType(ty.childType(), .other)) { + if (!try sema.validateExternType(ty.childType(mod), .other)) { const msg = msg: { const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); @@ -23014,7 +23062,7 @@ fn zirBuiltinExtern( }; if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) { - ty = try Type.optional(sema.arena, ty); + ty = try Type.optional(sema.arena, ty, mod); } // TODO check duplicate extern @@ -23194,7 +23242,7 @@ fn validateRunTimeType( => return false, .Pointer => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Opaque => return true, .Fn => return elem_ty.isFnOrHasRuntimeBits(mod), @@ -23204,11 +23252,10 @@ fn validateRunTimeType( .Opaque => return is_extern, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); return sema.validateRunTimeType(child_ty, is_extern); }, - .Array, .Vector => ty = ty.elemType(), + .Array, .Vector => ty = ty.childType(mod), .ErrorUnion => ty = ty.errorUnionPayload(), @@ -23277,7 +23324,7 @@ fn explainWhyTypeIsComptimeInner( }, .Array, .Vector => { - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Pointer => { const elem_ty = ty.elemType2(mod); @@ -23295,12 +23342,11 @@ fn explainWhyTypeIsComptimeInner( } return; } - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(&buf), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set); }, .ErrorUnion => { try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(), type_set); @@ -23451,7 +23497,7 @@ fn explainWhyTypeIsNotExtern( if (ty.isSlice(mod)) { try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); } else { - const pointee_ty = ty.childType(); + const pointee_ty = ty.childType(mod); try mod.errNoteNonLazy(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsComptime(msg, src_loc, pointee_ty); } @@ -23698,7 +23744,7 @@ fn panicWithMsg( .@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic }); const null_stack_trace = try sema.addConstant( - try Type.optional(arena, ptr_stack_trace_ty), + try Type.optional(arena, ptr_stack_trace_ty, mod), Value.null, ); const args: [3]Air.Inst.Ref = .{ msg_inst, null_stack_trace, .null_value }; @@ -23927,7 +23973,7 @@ fn fieldVal( const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) - object_ty.childType() + object_ty.childType(mod) else object_ty; @@ -23936,12 +23982,12 @@ fn fieldVal( if (mem.eql(u8, field_name, "len")) { return sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()), + try Value.Tag.int_u64.create(arena, inner_ty.arrayLen(mod)), ); } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { - const ptr_info = object_ty.ptrInfo().data; + const ptr_info = object_ty.ptrInfo(mod); const result_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = ptr_info.pointee_type.childType(), + .pointee_type = ptr_info.pointee_type.childType(mod), .sentinel = ptr_info.sentinel, .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", @@ -23964,7 +24010,7 @@ fn fieldVal( } }, .Pointer => { - const ptr_info = inner_ty.ptrInfo().data; + const ptr_info = inner_ty.ptrInfo(mod); if (ptr_info.size == .Slice) { if (mem.eql(u8, field_name, "ptr")) { const slice = if (is_pointer_to) @@ -24107,7 +24153,7 @@ fn fieldPtr( const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { - .Pointer => object_ptr_ty.elemType(), + .Pointer => object_ptr_ty.childType(mod), else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), }; @@ -24117,7 +24163,7 @@ fn fieldPtr( const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) - object_ty.childType() + object_ty.childType(mod) else object_ty; @@ -24128,7 +24174,7 @@ fn fieldPtr( defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, - try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()), + try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen(mod)), 0, // default alignment )); } else { @@ -24154,9 +24200,9 @@ fn fieldPtr( const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = slice_ptr_ty, - .mutable = attr_ptr_ty.ptrIsMutable(), + .mutable = attr_ptr_ty.ptrIsMutable(mod), .@"volatile" = attr_ptr_ty.isVolatilePtr(), - .@"addrspace" = attr_ptr_ty.ptrAddressSpace(), + .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { @@ -24175,9 +24221,9 @@ fn fieldPtr( } else if (mem.eql(u8, field_name, "len")) { const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = Type.usize, - .mutable = attr_ptr_ty.ptrIsMutable(), + .mutable = attr_ptr_ty.ptrIsMutable(mod), .@"volatile" = attr_ptr_ty.isVolatilePtr(), - .@"addrspace" = attr_ptr_ty.ptrAddressSpace(), + .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { @@ -24329,14 +24375,14 @@ fn fieldCallBind( const mod = sema.mod; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); - const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) - raw_ptr_ty.childType() + const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) + raw_ptr_ty.childType(mod) else return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); // Optionally dereference a second pointer to get the concrete type. - const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize() == .One; - const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; + const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One; + const concrete_ty = if (is_double_ptr) inner_ty.childType(mod) else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) try sema.analyzeLoad(block, src, raw_ptr, src) @@ -24404,9 +24450,9 @@ fn fieldCallBind( // zig fmt: off if (first_param_type.isGenericPoison() or ( first_param_type.zigTypeTag(mod) == .Pointer and - (first_param_type.ptrSize() == .One or - first_param_type.ptrSize() == .C) and - first_param_type.childType().eql(concrete_ty, sema.mod))) + (first_param_type.ptrSize(mod) == .One or + first_param_type.ptrSize(mod) == .C) and + first_param_type.childType(mod).eql(concrete_ty, sema.mod))) { // zig fmt: on // Note that if the param type is generic poison, we know that it must @@ -24425,8 +24471,7 @@ fn fieldCallBind( .arg0_inst = deref, } }; } else if (first_param_type.zigTypeTag(mod) == .Optional) { - var opt_buf: Type.Payload.ElemType = undefined; - const child = first_param_type.optionalChild(&opt_buf); + const child = first_param_type.optionalChild(mod); if (child.eql(concrete_ty, sema.mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ @@ -24434,8 +24479,8 @@ fn fieldCallBind( .arg0_inst = deref, } }; } else if (child.zigTypeTag(mod) == .Pointer and - child.ptrSize() == .One and - child.childType().eql(concrete_ty, sema.mod)) + child.ptrSize(mod) == .One and + child.childType(mod).eql(concrete_ty, sema.mod)) { return .{ .method = .{ .func_inst = decl_val, @@ -24482,15 +24527,15 @@ fn finishFieldCallBind( field_index: u32, object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { + const mod = sema.mod; const arena = sema.arena; const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = ptr_ty.ptrIsMutable(), - .@"addrspace" = ptr_ty.ptrAddressSpace(), + .mutable = ptr_ty.ptrIsMutable(mod), + .@"addrspace" = ptr_ty.ptrAddressSpace(mod), }); - const mod = sema.mod; - const container_ty = ptr_ty.childType(); + const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; @@ -24618,7 +24663,7 @@ fn structFieldPtrByIndex( const struct_obj = struct_ty.castTag(.@"struct").?.data; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); - const struct_ptr_ty_info = struct_ptr_ty.ptrInfo().data; + const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ .pointee_type = field.ty, @@ -24696,7 +24741,7 @@ fn structFieldPtrByIndex( ptr_field_ty, try Value.Tag.field_ptr.create(sema.arena, .{ .container_ptr = struct_ptr_val, - .container_ty = struct_ptr_ty.childType(), + .container_ty = struct_ptr_ty.childType(mod), .field_index = field_index, }), ); @@ -24846,9 +24891,9 @@ fn unionFieldPtr( const field = union_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field.ty, - .mutable = union_ptr_ty.ptrIsMutable(), + .mutable = union_ptr_ty.ptrIsMutable(mod), .@"volatile" = union_ptr_ty.isVolatilePtr(), - .@"addrspace" = union_ptr_ty.ptrAddressSpace(), + .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod), }); const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); @@ -25009,7 +25054,7 @@ fn elemPtr( const indexable_ptr_ty = sema.typeOf(indexable_ptr); const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { - .Pointer => indexable_ptr_ty.elemType(), + .Pointer => indexable_ptr_ty.childType(mod), else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), }; try checkIndexable(sema, block, src, indexable_ty); @@ -25046,7 +25091,7 @@ fn elemPtrOneLayerOnly( try checkIndexable(sema, block, src, indexable_ty); - switch (indexable_ty.ptrSize()) { + switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable); @@ -25065,7 +25110,7 @@ fn elemPtrOneLayerOnly( return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .One => { - assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety); }, } @@ -25091,7 +25136,7 @@ fn elemVal( const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); switch (indexable_ty.zigTypeTag(mod)) { - .Pointer => switch (indexable_ty.ptrSize()) { + .Pointer => switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable); @@ -25112,7 +25157,7 @@ fn elemVal( return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { - assert(indexable_ty.childType().zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, @@ -25171,7 +25216,7 @@ fn tupleFieldPtr( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); - const tuple_ty = tuple_ptr_ty.childType(); + const tuple_ty = tuple_ptr_ty.childType(mod); _ = try sema.resolveTypeFields(tuple_ty); const field_count = tuple_ty.structFieldCount(); @@ -25188,9 +25233,9 @@ fn tupleFieldPtr( const field_ty = tuple_ty.structFieldType(field_index); const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, - .mutable = tuple_ptr_ty.ptrIsMutable(), + .mutable = tuple_ptr_ty.ptrIsMutable(mod), .@"volatile" = tuple_ptr_ty.isVolatilePtr(), - .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(), + .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { @@ -25271,10 +25316,10 @@ fn elemValArray( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const array_ty = sema.typeOf(array); - const array_sent = array_ty.sentinel(); - const array_len = array_ty.arrayLen(); + const array_sent = array_ty.sentinel(mod); + const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @boolToInt(array_sent != null); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); if (array_len_s == 0) { return sema.fail(block, array_src, "indexing into empty array is not allowed", .{}); @@ -25335,9 +25380,9 @@ fn elemPtrArray( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const array_ptr_ty = sema.typeOf(array_ptr); - const array_ty = array_ptr_ty.childType(); - const array_sent = array_ty.sentinel() != null; - const array_len = array_ty.arrayLen(); + const array_ty = array_ptr_ty.childType(mod); + const array_sent = array_ty.sentinel(mod) != null; + const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @boolToInt(array_sent); if (array_len_s == 0) { @@ -25396,7 +25441,7 @@ fn elemValSlice( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const slice_ty = sema.typeOf(slice); - const slice_sent = slice_ty.sentinel() != null; + const slice_sent = slice_ty.sentinel(mod) != null; const elem_ty = slice_ty.elemType2(mod); var runtime_src = slice_src; @@ -25453,7 +25498,7 @@ fn elemPtrSlice( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const slice_ty = sema.typeOf(slice); - const slice_sent = slice_ty.sentinel() != null; + const slice_sent = slice_ty.sentinel(mod) != null; const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice); // The index must not be undefined since it can be out of bounds. @@ -25614,7 +25659,7 @@ fn coerceExtra( } // T to ?T - const child_type = try dest_ty.optionalChildAlloc(sema.arena); + const child_type = dest_ty.optionalChild(mod); const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => { if (in_memory_result == .no_match) { @@ -25628,7 +25673,7 @@ fn coerceExtra( return try sema.wrapOptional(block, dest_ty, intermediate, inst_src); }, .Pointer => pointer: { - const dest_info = dest_ty.ptrInfo().data; + const dest_info = dest_ty.ptrInfo(mod); // Function body to function pointer. if (inst_ty.zigTypeTag(mod) == .Fn) { @@ -25643,11 +25688,11 @@ fn coerceExtra( if (dest_info.size != .One) break :single_item; if (!inst_ty.isSinglePointer(mod)) break :single_item; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const ptr_elem_ty = inst_ty.childType(); + const ptr_elem_ty = inst_ty.childType(mod); const array_ty = dest_info.pointee_type; if (array_ty.zigTypeTag(mod) != .Array) break :single_item; - const array_elem_ty = array_ty.childType(); - if (array_ty.arrayLen() != 1) break :single_item; + const array_elem_ty = array_ty.childType(mod); + if (array_ty.arrayLen(mod) != 1) break :single_item; const dest_is_mut = dest_info.mutable; switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { .ok => {}, @@ -25660,9 +25705,9 @@ fn coerceExtra( src_array_ptr: { if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const array_ty = inst_ty.childType(); + const array_ty = inst_ty.childType(mod); if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr; - const array_elem_type = array_ty.childType(); + const array_elem_type = array_ty.childType(mod); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; @@ -25680,7 +25725,7 @@ fn coerceExtra( } if (dest_info.sentinel) |dest_sent| { - if (array_ty.sentinel()) |inst_sent| { + if (array_ty.sentinel(mod)) |inst_sent| { if (!dest_sent.eql(inst_sent, dst_elem_type, sema.mod)) { in_memory_result = .{ .ptr_sentinel = .{ .actual = inst_sent, @@ -25721,7 +25766,7 @@ fn coerceExtra( if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr; // In this case we must add a safety check because the C pointer // could be null. - const src_elem_ty = inst_ty.childType(); + const src_elem_ty = inst_ty.childType(mod); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { @@ -25784,7 +25829,7 @@ fn coerceExtra( }, .Pointer => p: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; - const inst_info = inst_ty.ptrInfo().data; + const inst_info = inst_ty.ptrInfo(mod); switch (try sema.coerceInMemoryAllowed( block, dest_info.pointee_type, @@ -25814,7 +25859,7 @@ fn coerceExtra( .Union => { // pointer to anonymous struct to pointer to union if (inst_ty.isSinglePointer(mod) and - inst_ty.childType().isAnonStruct() and + inst_ty.childType(mod).isAnonStruct() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25823,7 +25868,7 @@ fn coerceExtra( .Struct => { // pointer to anonymous struct to pointer to struct if (inst_ty.isSinglePointer(mod) and - inst_ty.childType().isAnonStruct() and + inst_ty.childType(mod).isAnonStruct() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) { @@ -25835,7 +25880,7 @@ fn coerceExtra( .Array => { // pointer to tuple to pointer to array if (inst_ty.isSinglePointer(mod) and - inst_ty.childType().isTuple() and + inst_ty.childType(mod).isTuple() and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25854,7 +25899,7 @@ fn coerceExtra( } if (!inst_ty.isSinglePointer(mod)) break :to_slice; - const inst_child_ty = inst_ty.childType(); + const inst_child_ty = inst_ty.childType(mod); if (!inst_child_ty.isTuple()) break :to_slice; // empty tuple to zero-length slice @@ -25887,7 +25932,7 @@ fn coerceExtra( .Many => p: { if (!inst_ty.isSlice(mod)) break :p; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; - const inst_info = inst_ty.ptrInfo().data; + const inst_info = inst_ty.ptrInfo(mod); switch (try sema.coerceInMemoryAllowed( block, @@ -26196,9 +26241,8 @@ fn coerceExtra( } // ?T to T - var buf: Type.Payload.ElemType = undefined; if (inst_ty.zigTypeTag(mod) == .Optional and - (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) + (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{}); try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{}); @@ -26399,10 +26443,8 @@ const InMemoryCoercionResult = union(enum) { cur = pair.child; }, .optional_shape => |pair| { - var buf_actual: Type.Payload.ElemType = undefined; - var buf_wanted: Type.Payload.ElemType = undefined; try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.optionalChild(&buf_actual).fmt(sema.mod), pair.wanted.optionalChild(&buf_wanted).fmt(sema.mod), + pair.actual.optionalChild(mod).fmt(sema.mod), pair.wanted.optionalChild(mod).fmt(sema.mod), }); break; }, @@ -26640,10 +26682,8 @@ fn coerceInMemoryAllowed( } // Pointers / Pointer-like Optionals - var dest_buf: Type.Payload.ElemType = undefined; - var src_buf: Type.Payload.ElemType = undefined; - const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty, &dest_buf); - const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty, &src_buf); + const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty); + const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty); if (maybe_dest_ptr_ty) |dest_ptr_ty| { if (maybe_src_ptr_ty) |src_ptr_ty| { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src); @@ -26685,8 +26725,8 @@ fn coerceInMemoryAllowed( // Arrays if (dest_tag == .Array and src_tag == .Array) { - const dest_info = dest_ty.arrayInfo(); - const src_info = src_ty.arrayInfo(); + const dest_info = dest_ty.arrayInfo(mod); + const src_info = src_ty.arrayInfo(mod); if (dest_info.len != src_info.len) { return InMemoryCoercionResult{ .array_len = .{ .actual = src_info.len, @@ -26717,8 +26757,8 @@ fn coerceInMemoryAllowed( // Vectors if (dest_tag == .Vector and src_tag == .Vector) { - const dest_len = dest_ty.vectorLen(); - const src_len = src_ty.vectorLen(); + const dest_len = dest_ty.vectorLen(mod); + const src_len = src_ty.vectorLen(mod); if (dest_len != src_len) { return InMemoryCoercionResult{ .vector_len = .{ .actual = src_len, @@ -26748,8 +26788,8 @@ fn coerceInMemoryAllowed( .wanted = dest_ty, } }; } - const dest_child_type = dest_ty.optionalChild(&dest_buf); - const src_child_type = src_ty.optionalChild(&src_buf); + const dest_child_type = dest_ty.optionalChild(mod); + const src_child_type = src_ty.optionalChild(mod); const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src); if (child != .ok) { @@ -27019,8 +27059,8 @@ fn coerceInMemoryAllowedPtrs( src_src: LazySrcLoc, ) !InMemoryCoercionResult { const mod = sema.mod; - const dest_info = dest_ptr_ty.ptrInfo().data; - const src_info = src_ptr_ty.ptrInfo().data; + const dest_info = dest_ptr_ty.ptrInfo(mod); + const src_info = src_ptr_ty.ptrInfo(mod); const ok_ptr_size = src_info.size == dest_info.size or src_info.size == .C or dest_info.size == .C; @@ -27206,11 +27246,12 @@ fn storePtr2( operand_src: LazySrcLoc, air_tag: Air.Inst.Tag, ) CompileError!void { + const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); if (ptr_ty.isConstPtr()) return sema.fail(block, ptr_src, "cannot assign to constant", .{}); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); // To generate better code for tuples, we detect a tuple operand here, and // analyze field loads and stores directly. This avoids an extra allocation + memcpy @@ -27221,7 +27262,6 @@ fn storePtr2( // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); - const mod = sema.mod; if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) { const field_count = operand_ty.structFieldCount(); var i: u32 = 0; @@ -27247,7 +27287,7 @@ fn storePtr2( // as well as working around an LLVM bug: // https://github.com/ziglang/zig/issues/11154 if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| { - const vector_ty = sema.typeOf(vector_ptr).childType(); + const vector_ty = sema.typeOf(vector_ptr).childType(mod); const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, @@ -27288,7 +27328,7 @@ fn storePtr2( try sema.requireRuntimeBlock(block, src, runtime_src); try sema.queueFullTypeResolution(elem_ty); - if (ptr_ty.ptrInfo().data.vector_index == .runtime) { + if (ptr_ty.ptrInfo(mod).vector_index == .runtime) { const ptr_inst = Air.refToIndex(ptr).?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[ptr_inst] == .ptr_elem_ptr) { @@ -27322,8 +27362,8 @@ fn storePtr2( /// pointer. Only if the final element type matches the vector element type, and the /// lengths match. fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { - const array_ty = sema.typeOf(ptr).childType(); const mod = sema.mod; + const array_ty = sema.typeOf(ptr).childType(mod); if (array_ty.zigTypeTag(mod) != .Array) return null; var ptr_inst = Air.refToIndex(ptr) orelse return null; const air_datas = sema.air_instructions.items(.data); @@ -27332,7 +27372,6 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const prev_ptr = air_datas[ptr_inst].ty_op.operand; const prev_ptr_ty = sema.typeOf(prev_ptr); const prev_ptr_child_ty = switch (prev_ptr_ty.tag()) { - .single_mut_pointer => prev_ptr_ty.castTag(.single_mut_pointer).?.data, .pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type, else => return null, }; @@ -27342,9 +27381,9 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { // We have a pointer-to-array and a pointer-to-vector. If the elements and // lengths match, return the result. - const vector_ty = sema.typeOf(prev_ptr).childType(); - if (array_ty.childType().eql(vector_ty.childType(), sema.mod) and - array_ty.arrayLen() == vector_ty.vectorLen()) + const vector_ty = sema.typeOf(prev_ptr).childType(mod); + if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and + array_ty.arrayLen(mod) == vector_ty.vectorLen(mod)) { return prev_ptr; } else { @@ -27476,14 +27515,14 @@ fn beginComptimePtrMutation( switch (parent.pointee) { .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(); + const check_len = parent.ty.arrayLenIncludingSentinel(mod); if (elem_ptr.index >= check_len) { // TODO have the parent include the decl so we can say "declared here" return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ elem_ptr.index, check_len, }); } - const elem_ty = parent.ty.childType(); + const elem_ty = parent.ty.childType(mod); // We might have a pointer to multiple elements of the array (e.g. a pointer // to a sub-array). In this case, we just have to reinterpret the relevant @@ -27510,7 +27549,7 @@ fn beginComptimePtrMutation( defer parent.finishArena(sema.mod); const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); const elems = try arena.alloc(Value, array_len_including_sentinel); @memset(elems, Value.undef); @@ -27536,7 +27575,7 @@ fn beginComptimePtrMutation( defer parent.finishArena(sema.mod); const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(); + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); // bytes.len may be one greater than dest_len because of the case when // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. assert(bytes.len >= dest_len); @@ -27567,13 +27606,13 @@ fn beginComptimePtrMutation( defer parent.finishArena(sema.mod); const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(); + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const elems = try arena.alloc(Value, @intCast(usize, dest_len)); for (bytes, 0..) |byte, i| { elems[i] = try Value.Tag.int_u64.create(arena, byte); } - if (parent.ty.sentinel()) |sent_val| { + if (parent.ty.sentinel(mod)) |sent_val| { assert(elems.len == bytes.len + 1); elems[bytes.len] = sent_val; } @@ -27603,7 +27642,7 @@ fn beginComptimePtrMutation( const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); const elems = try arena.alloc(Value, array_len_including_sentinel); if (elems.len > 0) elems[0] = repeated_val; for (elems[1..]) |*elem| { @@ -27906,12 +27945,12 @@ fn beginComptimePtrMutation( }, .opt_payload_ptr => { const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, try ptr_elem_ty.optionalChildAlloc(sema.arena)); + return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); }; var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); switch (parent.pointee) { .direct => |val_ptr| { - const payload_ty = try parent.ty.optionalChildAlloc(sema.arena); + const payload_ty = parent.ty.optionalChild(mod); switch (val_ptr.tag()) { .undef, .null_value => { // An optional has been initialized to undefined at comptime and now we @@ -27984,7 +28023,7 @@ fn beginComptimePtrMutationInner( // Handle the case that the decl is an array and we're actually trying to point to an element. if (decl_ty.isArrayOrVector(mod)) { - const decl_elem_ty = decl_ty.childType(); + const decl_elem_ty = decl_ty.childType(mod); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ .decl_ref_mut = decl_ref_mut, @@ -28105,7 +28144,7 @@ fn beginComptimePtrLoad( // If we're loading an elem_ptr that was derived from a different type // than the true type of the underlying decl, we cannot deref directly const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { - const deref_elem_ty = deref.pointee.?.ty.childType(); + const deref_elem_ty = deref.pointee.?.ty.childType(mod); break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; } else false; @@ -28115,12 +28154,12 @@ fn beginComptimePtrLoad( } var array_tv = deref.pointee.?; - const check_len = array_tv.ty.arrayLenIncludingSentinel(); + const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType().eql(elem_ty, sema.mod)) { - const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) { + const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), @@ -28134,7 +28173,7 @@ fn beginComptimePtrLoad( break :blk deref; } if (elem_ptr.index == check_len - 1) { - if (array_tv.ty.sentinel()) |sent| { + if (array_tv.ty.sentinel(mod)) |sent| { deref.pointee = TypedValue{ .ty = elem_ty, .val = sent, @@ -28226,7 +28265,7 @@ fn beginComptimePtrLoad( const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; const payload_ty = switch (ptr_val.tag()) { .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), - .opt_payload_ptr => try payload_ptr.container_ty.optionalChildAlloc(sema.arena), + .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), else => unreachable, }; var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); @@ -28357,12 +28396,13 @@ fn coerceArrayPtrToSlice( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(inst)) |val| { const ptr_array_ty = sema.typeOf(inst); - const array_ty = ptr_array_ty.childType(); + const array_ty = ptr_array_ty.childType(mod); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = val, - .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), + .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)), }); return sema.addConstant(dest_ty, slice_val); } @@ -28371,11 +28411,11 @@ fn coerceArrayPtrToSlice( } fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool { - const dest_info = dest_ty.ptrInfo().data; - const inst_info = inst_ty.ptrInfo().data; const mod = sema.mod; - const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or - (inst_info.pointee_type.arrayLen() == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or + const dest_info = dest_ty.ptrInfo(mod); + const inst_info = inst_ty.ptrInfo(mod); + const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel(mod) == 0 or + (inst_info.pointee_type.arrayLen(mod) == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0); const ok_cv_qualifiers = @@ -28647,7 +28687,8 @@ fn coerceAnonStructToUnionPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const union_ty = ptr_union_ty.childType(); + const mod = sema.mod; + const union_ty = ptr_union_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src); return sema.analyzeRef(block, union_ty_src, union_inst); @@ -28661,7 +28702,8 @@ fn coerceAnonStructToStructPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const struct_ty = ptr_struct_ty.childType(); + const mod = sema.mod; + const struct_ty = ptr_struct_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src); return sema.analyzeRef(block, struct_ty_src, struct_inst); @@ -28676,15 +28718,16 @@ fn coerceArrayLike( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const inst_len = inst_ty.arrayLen(); - const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen()); - const target = sema.mod.getTarget(); + const inst_len = inst_ty.arrayLen(mod); + const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod)); + const target = mod.getTarget(); if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ - dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), + dest_ty.fmt(mod), inst_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len}); @@ -28694,8 +28737,8 @@ fn coerceArrayLike( return sema.failWithOwnedErrorMsg(msg); } - const dest_elem_ty = dest_ty.childType(); - const inst_elem_ty = inst_ty.childType(); + const dest_elem_ty = dest_ty.childType(mod); + const inst_elem_ty = inst_ty.childType(mod); const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_elem_ty, inst_elem_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(inst)) |inst_val| { @@ -28749,9 +28792,10 @@ fn coerceTupleToArray( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const inst_len = inst_ty.arrayLen(); - const dest_len = dest_ty.arrayLen(); + const inst_len = inst_ty.arrayLen(mod); + const dest_len = dest_ty.arrayLen(mod); if (dest_len != inst_len) { const msg = msg: { @@ -28766,16 +28810,16 @@ fn coerceTupleToArray( return sema.failWithOwnedErrorMsg(msg); } - const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel()); + const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel(mod)); const element_vals = try sema.arena.alloc(Value, dest_elems); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems); - const dest_elem_ty = dest_ty.childType(); + const dest_elem_ty = dest_ty.childType(mod); var runtime_src: ?LazySrcLoc = null; for (element_vals, 0..) |*elem, i_usize| { const i = @intCast(u32, i_usize); if (i_usize == inst_len) { - elem.* = dest_ty.sentinel().?; + elem.* = dest_ty.sentinel(mod).?; element_refs[i] = try sema.addConstant(dest_elem_ty, elem.*); break; } @@ -28812,9 +28856,10 @@ fn coerceTupleToSlicePtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { - const tuple_ty = sema.typeOf(ptr_tuple).childType(); + const mod = sema.mod; + const tuple_ty = sema.typeOf(ptr_tuple).childType(mod); const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); - const slice_info = slice_ty.ptrInfo().data; + const slice_info = slice_ty.ptrInfo(mod); const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, sema.mod); const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src); if (slice_info.@"align" != 0) { @@ -28833,8 +28878,9 @@ fn coerceTupleToArrayPtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); - const ptr_info = ptr_array_ty.ptrInfo().data; + const ptr_info = ptr_array_ty.ptrInfo(mod); const array_ty = ptr_info.pointee_type; const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src); if (ptr_info.@"align" != 0) { @@ -29231,7 +29277,7 @@ fn analyzeLoad( const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag(mod)) { - .Pointer => ptr_ty.childType(), + .Pointer => ptr_ty.childType(mod), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; @@ -29245,7 +29291,7 @@ fn analyzeLoad( } } - if (ptr_ty.ptrInfo().data.vector_index == .runtime) { + if (ptr_ty.ptrInfo(mod).vector_index == .runtime) { const ptr_inst = Air.refToIndex(ptr).?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[ptr_inst] == .ptr_elem_ptr) { @@ -29318,8 +29364,7 @@ fn analyzeIsNull( const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; const operand_ty = sema.typeOf(operand); - var buf: Type.Payload.ElemType = undefined; - if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(&buf).zigTypeTag(mod) == .NoReturn) { + if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) { return inverted_non_null_res; } if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) { @@ -29339,7 +29384,7 @@ fn analyzePtrIsNonErrComptimeOnly( const mod = sema.mod; const ptr_ty = sema.typeOf(operand); assert(ptr_ty.zigTypeTag(mod) == .Pointer); - const child_ty = ptr_ty.childType(); + const child_ty = ptr_ty.childType(mod); const child_tag = child_ty.zigTypeTag(mod); if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true; @@ -29495,7 +29540,7 @@ fn analyzeSlice( // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { - .Pointer => ptr_ptr_ty.elemType(), + .Pointer => ptr_ptr_ty.childType(mod), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}), }; @@ -29506,30 +29551,30 @@ fn analyzeSlice( var ptr_sentinel: ?Value = null; switch (ptr_ptr_child_ty.zigTypeTag(mod)) { .Array => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); - elem_ty = ptr_ptr_child_ty.childType(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); + elem_ty = ptr_ptr_child_ty.childType(mod); }, - .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { + .Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) { .One => { - const double_child_ty = ptr_ptr_child_ty.childType(); + const double_child_ty = ptr_ptr_child_ty.childType(mod); if (double_child_ty.zigTypeTag(mod) == .Array) { - ptr_sentinel = double_child_ty.sentinel(); + ptr_sentinel = double_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = double_child_ty; - elem_ty = double_child_ty.childType(); + elem_ty = double_child_ty.childType(mod); } else { return sema.fail(block, src, "slice of single-item pointer", .{}); } }, .Many, .C => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; - elem_ty = ptr_ptr_child_ty.childType(); + elem_ty = ptr_ptr_child_ty.childType(mod); - if (ptr_ptr_child_ty.ptrSize() == .C) { + if (ptr_ptr_child_ty.ptrSize(mod) == .C) { if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| { if (ptr_val.isNull(mod)) { return sema.fail(block, src, "slice of null pointer", .{}); @@ -29538,11 +29583,11 @@ fn analyzeSlice( } }, .Slice => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; - elem_ty = ptr_ptr_child_ty.childType(); + elem_ty = ptr_ptr_child_ty.childType(mod); }, }, else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}), @@ -29563,7 +29608,7 @@ fn analyzeSlice( var end_is_len = uncasted_end_opt == .none; const end = e: { if (array_ty.zigTypeTag(mod) == .Array) { - const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()); + const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)); if (!end_is_len) { const end = if (by_length) end: { @@ -29574,10 +29619,10 @@ fn analyzeSlice( if (try sema.resolveMaybeUndefVal(end)) |end_val| { const len_s_val = try Value.Tag.int_u64.create( sema.arena, - array_ty.arrayLenIncludingSentinel(), + array_ty.arrayLenIncludingSentinel(mod), ); if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) { - const sentinel_label: []const u8 = if (array_ty.sentinel() != null) + const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null) " +1 (sentinel)" else ""; @@ -29617,7 +29662,7 @@ fn analyzeSlice( if (slice_val.isUndef()) { return sema.fail(block, src, "slice of undefined", .{}); } - const has_sentinel = slice_ty.sentinel() != null; + const has_sentinel = slice_ty.sentinel(mod) != null; var int_payload: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel), @@ -29751,8 +29796,8 @@ fn analyzeSlice( try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false); const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); - const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data; - const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; + const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo(mod); + const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { const new_len_int = new_len_val.toUnsignedInt(mod); @@ -29780,7 +29825,7 @@ fn analyzeSlice( if (slice_ty.isSlice(mod)) { const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); - const actual_len = if (slice_ty.sentinel() == null) + const actual_len = if (slice_ty.sentinel(mod) == null) slice_len_inst else try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); @@ -29839,7 +29884,7 @@ fn analyzeSlice( // requirement: end <= len const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) - try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel()) + try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) else if (slice_ty.isSlice(mod)) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the @@ -29848,7 +29893,7 @@ fn analyzeSlice( } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); - if (slice_ty.sentinel() == null) break :blk slice_len_inst; + if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst; // we have to add one because slice lengths don't include the sentinel break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); @@ -30284,7 +30329,10 @@ fn cmpVector( const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src); - const result_ty = try Type.vector(sema.arena, lhs_ty.vectorLen(), Type.bool); + const result_ty = try mod.vectorType(.{ + .len = lhs_ty.vectorLen(mod), + .child = .bool_type, + }); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { @@ -30484,12 +30532,12 @@ fn resolvePeerTypes( } continue; }, - .Pointer => if (chosen_ty.ptrSize() == .C) continue, + .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, else => {}, }, .ComptimeInt => switch (chosen_ty_tag) { .Int, .Float, .ComptimeFloat => continue, - .Pointer => if (chosen_ty.ptrSize() == .C) continue, + .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, else => {}, }, .Float => switch (chosen_ty_tag) { @@ -30654,10 +30702,10 @@ fn resolvePeerTypes( }, }, .Pointer => { - const cand_info = candidate_ty.ptrInfo().data; + const cand_info = candidate_ty.ptrInfo(mod); switch (chosen_ty_tag) { .Pointer => { - const chosen_info = chosen_ty.ptrInfo().data; + const chosen_info = chosen_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30690,8 +30738,8 @@ fn resolvePeerTypes( chosen_info.pointee_type.zigTypeTag(mod) == .Array and cand_info.pointee_type.zigTypeTag(mod) == .Array) { - const chosen_elem_ty = chosen_info.pointee_type.childType(); - const cand_elem_ty = cand_info.pointee_type.childType(); + const chosen_elem_ty = chosen_info.pointee_type.childType(mod); + const cand_elem_ty = cand_info.pointee_type.childType(mod); const chosen_ok = .ok == try sema.coerceInMemoryAllowed(block, chosen_elem_ty, cand_elem_ty, chosen_info.mutable, target, src, src); if (chosen_ok) { @@ -30757,10 +30805,9 @@ fn resolvePeerTypes( } }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const chosen_ptr_ty = chosen_ty.optionalChild(&opt_child_buf); + const chosen_ptr_ty = chosen_ty.optionalChild(mod); if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo().data; + const chosen_info = chosen_ptr_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30777,7 +30824,7 @@ fn resolvePeerTypes( .ErrorUnion => { const chosen_ptr_ty = chosen_ty.errorUnionPayload(); if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo().data; + const chosen_info = chosen_ptr_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30802,8 +30849,7 @@ fn resolvePeerTypes( } }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf); + const opt_child_ty = candidate_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) { seen_const = seen_const or opt_child_ty.isConstPtr(); any_are_null = true; @@ -30818,13 +30864,13 @@ fn resolvePeerTypes( }, .Vector => switch (chosen_ty_tag) { .Vector => { - const chosen_len = chosen_ty.vectorLen(); - const candidate_len = candidate_ty.vectorLen(); + const chosen_len = chosen_ty.vectorLen(mod); + const candidate_len = candidate_ty.vectorLen(mod); if (chosen_len != candidate_len) continue; - const chosen_child_ty = chosen_ty.childType(); - const candidate_child_ty = candidate_ty.childType(); + const chosen_child_ty = chosen_ty.childType(mod); + const candidate_child_ty = candidate_ty.childType(mod); if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) { const chosen_info = chosen_child_ty.intInfo(mod); const candidate_info = candidate_child_ty.intInfo(mod); @@ -30853,8 +30899,8 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag(mod) == .Fn) { - if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) { + .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { + if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) { continue; } }, @@ -30874,8 +30920,7 @@ fn resolvePeerTypes( continue; }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf); + const opt_child_ty = chosen_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, opt_child_ty, candidate_ty, false, target, src, src)) == .ok) { continue; } @@ -30949,16 +30994,16 @@ fn resolvePeerTypes( if (convert_to_slice) { // turn *[N]T => []T - const chosen_child_ty = chosen_ty.childType(); - var info = chosen_ty.ptrInfo(); - info.data.sentinel = chosen_child_ty.sentinel(); - info.data.size = .Slice; - info.data.mutable = !(seen_const or chosen_child_ty.isConstPtr()); - info.data.pointee_type = chosen_child_ty.elemType2(mod); - - const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); + const chosen_child_ty = chosen_ty.childType(mod); + var info = chosen_ty.ptrInfo(mod); + info.sentinel = chosen_child_ty.sentinel(mod); + info.size = .Slice; + info.mutable = !(seen_const or chosen_child_ty.isConstPtr()); + info.pointee_type = chosen_child_ty.elemType2(mod); + + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; @@ -30970,22 +31015,22 @@ fn resolvePeerTypes( switch (chosen_ty.zigTypeTag(mod)) { .ErrorUnion => { const ptr_ty = chosen_ty.errorUnionPayload(); - var info = ptr_ty.ptrInfo(); - info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); + var info = ptr_ty.ptrInfo(mod); + info.mutable = false; + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); }, .Pointer => { - var info = chosen_ty.ptrInfo(); - info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, mod, info.data); + var info = chosen_ty.ptrInfo(mod); + info.mutable = false; + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; @@ -30998,7 +31043,7 @@ fn resolvePeerTypes( if (any_are_null) { const opt_ty = switch (chosen_ty.zigTypeTag(mod)) { .Null, .Optional => chosen_ty, - else => try Type.optional(sema.arena, chosen_ty), + else => try Type.optional(sema.arena, chosen_ty, mod), }; const set_ty = err_set_ty orelse return opt_ty; return try Type.errorUnion(sema.arena, set_ty, opt_ty, mod); @@ -31077,13 +31122,12 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { .Struct => return sema.resolveStructLayout(ty), .Union => return sema.resolveUnionLayout(ty), .Array => { - if (ty.arrayLenIncludingSentinel() == 0) return; - const elem_ty = ty.childType(); + if (ty.arrayLenIncludingSentinel(mod) == 0) return; + const elem_ty = ty.childType(mod); return sema.resolveTypeLayout(elem_ty); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); // In case of querying the ABI alignment of this optional, we will ask // for hasRuntimeBits() of the payload type, so we need "requires comptime" // to be known already before this function returns. @@ -31343,10 +31387,10 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Pointer) { - switch (ty.ptrSize()) { + switch (ty.ptrSize(mod)) { .Slice, .Many, .C => return, .One => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); if (elem_ty.zigTypeTag(mod) == .Array) return; // TODO https://github.com/ziglang/zig/issues/15479 // if (elem_ty.isTuple()) return; @@ -31418,8 +31462,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .int_type => false, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -31478,12 +31522,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }; return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .empty_struct_literal, .empty_struct, .error_set, @@ -31491,34 +31529,20 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_inferred, .error_set_merged, .@"opaque", - .array_u8, - .array_u8_sentinel_0, .enum_simple, => false, - .single_const_pointer_to_comptime_int, - .function, - => true, + .function => true, .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .array, .array_sentinel, - .vector, - => return sema.resolveTypeRequiresComptime(ty.childType()), + => return sema.resolveTypeRequiresComptime(ty.childType(mod)), - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); + .pointer => { + const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { @@ -31526,12 +31550,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return sema.resolveTypeRequiresComptime(ty.optionalChild(&buf)); + .optional => { + return sema.resolveTypeRequiresComptime(ty.optionalChild(mod)); }, .tuple, .anon_struct => { @@ -31609,7 +31629,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .Pointer => { - const child_ty = try sema.resolveTypeFields(ty.childType()); + const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, .Struct => switch (ty.tag()) { @@ -31624,10 +31644,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { else => {}, }, .Union => return sema.resolveUnionFully(ty), - .Array => return sema.resolveTypeFully(ty.childType()), + .Array => return sema.resolveTypeFully(ty.childType(mod)), .Optional => { - var buf: Type.Payload.ElemType = undefined; - return sema.resolveTypeFully(ty.optionalChild(&buf)); + return sema.resolveTypeFully(ty.optionalChild(mod)); }, .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()), .Fn => { @@ -32897,10 +32916,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .ptr_type => @panic("TODO"), + .ptr_type => return null, .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + if (vector_type.len == 0) return Value.initTag(.empty_array); + if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v; + return null; + }, + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -32963,34 +32986,15 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set_merged, .error_union, .function, - .single_const_pointer_to_comptime_int, .array_sentinel, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .anyerror_void_error_union, .error_set_inferred, .@"opaque", - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .anyframe_T, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer, - .single_mut_pointer, .pointer, => return null, .optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { return Value.null; } else { @@ -33111,10 +33115,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .vector, .array, .array_u8 => { - if (ty.arrayLen() == 0) + .array => { + if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(ty.elemType())) != null) { + if ((try sema.typeHasOnePossibleValue(ty.childType(mod))) != null) { return Value.initTag(.the_only_possible_value); } return null; @@ -33147,20 +33151,13 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .data = .{ .interned = ty.ip_index }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + } else { + try sema.air_instructions.append(sema.gpa, .{ + .tag = .const_ty, + .data = .{ .ty = ty }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } - switch (ty.tag()) { - .manyptr_u8 => return .manyptr_u8_type, - .manyptr_const_u8 => return .manyptr_const_u8_type, - .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, - .const_slice_u8 => return .const_slice_u8_type, - .anyerror_void_error_union => return .anyerror_void_error_union_type, - else => {}, - } - try sema.air_instructions.append(sema.gpa, .{ - .tag = .const_ty, - .data = .{ .ty = ty }, - }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -33173,6 +33170,15 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const gpa = sema.gpa; + if (val.ip_index != .none) { + if (@enumToInt(val.ip_index) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.ip_index)); + try sema.air_instructions.append(gpa, .{ + .tag = .interned, + .data = .{ .interned = val.ip_index }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + } const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); try sema.air_instructions.append(gpa, .{ @@ -33331,7 +33337,8 @@ pub fn analyzeAddressSpace( /// Asserts the value is a pointer and dereferences it. /// Returns `null` if the pointer contents cannot be loaded at comptime. fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { - const load_ty = ptr_ty.childType(); + const mod = sema.mod; + const load_ty = ptr_ty.childType(mod); const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty, true); switch (res) { .runtime_load => return null, @@ -33422,11 +33429,7 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError /// This can return `error.AnalysisFail` because it sometimes requires resolving whether /// a type has zero bits, which can cause a "foo depends on itself" compile error. /// This logic must be kept in sync with `Type.isPtrLikeOptional`. -fn typePtrOrOptionalPtrTy( - sema: *Sema, - ty: Type, - buf: *Type.Payload.ElemType, -) !?Type { +fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { const mod = sema.mod; if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -33435,14 +33438,14 @@ fn typePtrOrOptionalPtrTy( .C => return ptr_type.elem_type.toType(), .One, .Many => return ty, }, - .optional_type => |o| switch (mod.intern_pool.indexToKey(o.payload_type)) { + .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice, .C => return null, .Many, .One => { if (ptr_type.is_allowzero) return null; // optionals of zero sized types behave like bools, not pointers - const payload_ty = o.payload_type.toType(); + const payload_ty = opt_child.toType(); if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) { return null; } @@ -33456,25 +33459,9 @@ fn typePtrOrOptionalPtrTy( }; switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return ty.optionalChild(buf), - - .single_const_pointer_to_comptime_int, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return ty, - - .pointer => switch (ty.ptrSize()) { + .pointer => switch (ty.ptrSize(mod)) { .Slice => return null, - .C => return ty.optionalChild(buf), + .C => return ty.optionalChild(mod), else => return ty, }, @@ -33482,10 +33469,10 @@ fn typePtrOrOptionalPtrTy( .inferred_alloc_mut => unreachable, .optional => { - const child_type = ty.optionalChild(buf); + const child_type = ty.optionalChild(mod); if (child_type.zigTypeTag(mod) != .Pointer) return null; - const info = child_type.ptrInfo().data; + const info = child_type.ptrInfo(mod); switch (info.size) { .Slice, .C => return null, .Many, .One => { @@ -33518,8 +33505,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .int_type => return false, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| return switch (t) { .f16, @@ -33578,12 +33565,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } } return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .empty_struct_literal, .empty_struct, .error_set, @@ -33591,34 +33572,20 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_inferred, .error_set_merged, .@"opaque", - .array_u8, - .array_u8_sentinel_0, .enum_simple, => false, - .single_const_pointer_to_comptime_int, - .function, - => true, + .function => true, .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .array, .array_sentinel, - .vector, - => return sema.typeRequiresComptime(ty.childType()), + => return sema.typeRequiresComptime(ty.childType(mod)), - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); + .pointer => { + const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { return child_ty.fnInfo().is_generic; } else { @@ -33626,12 +33593,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return sema.typeRequiresComptime(ty.optionalChild(&buf)); + .optional => { + return sema.typeRequiresComptime(ty.optionalChild(mod)); }, .tuple, .anon_struct => { @@ -33814,7 +33777,7 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -33874,7 +33837,7 @@ fn intSub( ) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -33934,7 +33897,7 @@ fn floatAdd( ) !Value { const mod = sema.mod; if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); + const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -33992,7 +33955,7 @@ fn floatSub( ) !Value { const mod = sema.mod; if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); + const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34050,8 +34013,8 @@ fn intSubWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34105,8 +34068,8 @@ fn floatToInt( ) CompileError!Value { const mod = sema.mod; if (float_ty.zigTypeTag(mod) == .Vector) { - const elem_ty = float_ty.childType(); - const result_data = try sema.arena.alloc(Value, float_ty.vectorLen()); + const elem_ty = float_ty.childType(mod); + const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(sema.mod, i, &buf); @@ -34383,8 +34346,8 @@ fn intAddWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34442,7 +34405,7 @@ fn compareAll( const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < ty.vectorLen()) : (i += 1) { + while (i < ty.vectorLen(mod)) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); @@ -34490,7 +34453,7 @@ fn compareVector( ) !Value { const mod = sema.mod; assert(ty.zigTypeTag(mod) == .Vector); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -34511,10 +34474,10 @@ fn compareVector( /// This code is duplicated in `analyzePtrArithmetic`. fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { const mod = sema.mod; - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const elem_ty = ptr_ty.elemType2(mod); const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0; - const parent_ty = ptr_ty.childType(); + const parent_ty = ptr_ty.childType(mod); const VI = Type.Payload.Pointer.Data.VectorIndex; @@ -34522,14 +34485,14 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { host_size: u16 = 0, alignment: u32 = 0, vector_index: VI = .none, - } = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: { + } = if (parent_ty.isVector(mod) and ptr_info.size == .One) blk: { const elem_bits = elem_ty.bitSize(mod); if (elem_bits == 0) break :blk .{}; const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); if (!is_packed) break :blk .{}; break :blk .{ - .host_size = @intCast(u16, parent_ty.arrayLen()), + .host_size = @intCast(u16, parent_ty.arrayLen(mod)), .alignment = @intCast(u16, parent_ty.abiAlignment(mod)), .vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime, }; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 877a8f5f4c..7f599caafb 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -77,15 +77,6 @@ pub fn print( return writer.writeAll("(variable)"); while (true) switch (val.tag()) { - .single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"), - .const_slice_u8_type => return writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0_type => return writer.writeAll("[:0]const u8"), - .anyerror_void_error_union_type => return writer.writeAll("anyerror!void"), - - .manyptr_u8_type => return writer.writeAll("[*]u8"), - .manyptr_const_u8_type => return writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0_type => return writer.writeAll("[*:0]const u8"), - .empty_struct_value, .aggregate => { if (level == 0) { return writer.writeAll(".{ ... }"); @@ -112,7 +103,7 @@ pub fn print( return writer.writeAll("}"); } else { const elem_ty = ty.elemType2(mod); - const len = ty.arrayLen(); + const len = ty.arrayLen(mod); if (elem_ty.eql(Type.u8, mod)) str: { const max_len = @intCast(usize, std.math.min(len, max_string_len)); @@ -288,7 +279,7 @@ pub fn print( .ty = ty.elemType2(mod), .val = val.castTag(.repeated).?.data, }; - const len = ty.arrayLen(); + const len = ty.arrayLen(mod); const max_len = std.math.min(len, max_aggregate_items); while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); @@ -306,7 +297,7 @@ pub fn print( try writer.writeAll(".{ "); try print(.{ .ty = ty.elemType2(mod), - .val = ty.sentinel().?, + .val = ty.sentinel(mod).?, }, writer, level - 1, mod); return writer.writeAll(" }"); }, @@ -364,8 +355,7 @@ pub fn print( }, .opt_payload => { val = val.castTag(.opt_payload).?.data; - var buf: Type.Payload.ElemType = undefined; - ty = ty.optionalChild(&buf); + ty = ty.optionalChild(mod); return print(.{ .ty = ty, .val = val }, writer, level, mod); }, .eu_payload_ptr => { @@ -386,13 +376,8 @@ pub fn print( try writer.writeAll(", &(payload of "); - var ptr_ty: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = data.container_ty, - }; - try print(.{ - .ty = Type.initPayload(&ptr_ty.base), + .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), .val = data.container_ptr, }, writer, level - 1, mod); @@ -415,13 +400,8 @@ pub fn print( try writer.writeAll(", &(payload of "); - var ptr_ty: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = data.container_ty, - }; - try print(.{ - .ty = Type.initPayload(&ptr_ty.base), + .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), .val = data.container_ptr, }, writer, level - 1, mod); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 503bbdbb02..81169750c1 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -1030,7 +1030,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all @@ -1140,17 +1140,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into const ret_ty = self.fn_type.fnReturnType(); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -2406,9 +2403,9 @@ fn ptrArithmetic( assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = elem_ty.abiSize(mod); @@ -3024,8 +3021,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue { const mod = self.bin_file.options.module.?; - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&opt_buf); + const payload_ty = optional_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none; if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we reuse the operand here? @@ -3459,7 +3455,7 @@ fn ptrElemVal( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); // TODO optimize for elem_sizes of 1, 2, 4, 8 @@ -3617,7 +3613,7 @@ fn reuseOperand( fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.options.module.?; - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); switch (ptr) { @@ -3773,7 +3769,7 @@ fn genInlineMemset( ) !void { const dst_reg = switch (dst) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst), + else => try self.copyToTmpRegister(Type.manyptr_u8, dst), }; const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4096,7 +4092,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); - const struct_ty = ptr_ty.childType(); + const struct_ty = ptr_ty.childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { @@ -4173,7 +4169,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { @@ -4254,7 +4250,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -4280,11 +4276,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_ptr_reg = self.registerAlias(.x0, Type.usize); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.register_manager.getReg(ret_ptr_reg, null); try self.genSetReg(ptr_ty, ret_ptr_reg, .{ .ptr_stack_offset = stack_offset }); @@ -4453,11 +4445,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, @@ -4533,8 +4521,7 @@ fn cmp( const mod = self.bin_file.options.module.?; const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); + const payload_ty = lhs_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { @@ -4850,8 +4837,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { const mod = self.bin_file.options.module.?; const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = operand_ty.optionalChild(&buf); + const payload_ty = operand_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; @@ -4947,11 +4933,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4973,11 +4960,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4999,11 +4987,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5025,11 +5014,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5511,11 +5501,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5833,11 +5819,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5957,12 +5939,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -6079,8 +6062,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 55ec0d4125..c08cb58c48 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1010,7 +1010,7 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, @@ -1117,17 +1117,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into const ret_ty = self.fn_type.fnReturnType(); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -2372,8 +2369,8 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const elem_ty = ptr_ty.childType(); const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (elem_size) { @@ -2474,7 +2471,8 @@ fn arrayElemVal( array_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const elem_ty = array_ty.childType(); + const mod = self.bin_file.options.module.?; + const elem_ty = array_ty.childType(mod); const mcv = try array_bind.resolveToMcv(self); switch (mcv) { @@ -2508,11 +2506,7 @@ fn arrayElemVal( const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv }; - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = elem_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(elem_ty); return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst); }, @@ -2659,8 +2653,8 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (ptr) { @@ -2888,7 +2882,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); - const struct_ty = ptr_ty.childType(); + const struct_ty = ptr_ty.childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { @@ -3004,7 +2998,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); if (struct_ty.zigTypeTag(mod) == .Union) { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); @@ -3898,9 +3892,9 @@ fn ptrArithmetic( assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = @intCast(u32, elem_ty.abiSize(mod)); @@ -4079,7 +4073,7 @@ fn genInlineMemset( ) !void { const dst_reg = switch (dst) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst), + else => try self.copyToTmpRegister(Type.manyptr_u8, dst), }; const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4229,7 +4223,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -4259,11 +4253,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.register_manager.getReg(.r0, null); try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset }); @@ -4401,11 +4391,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, // invalid return result @@ -4482,8 +4468,7 @@ fn cmp( const mod = self.bin_file.options.module.?; const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); + const payload_ty = lhs_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { @@ -4837,11 +4822,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4863,11 +4849,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4924,11 +4911,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4950,11 +4938,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5455,11 +5444,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5816,11 +5801,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5908,12 +5889,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); @@ -6026,8 +6008,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 488b937141..1e5858a948 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -807,7 +807,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; @@ -1099,9 +1099,9 @@ fn binOp( switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = elem_ty.abiSize(mod); @@ -1502,7 +1502,8 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -2496,8 +2497,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index c565b6dc23..f8a62f9798 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -838,8 +838,9 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -871,12 +872,13 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -1300,7 +1302,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const mod = self.bin_file.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -1440,8 +1442,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { .Pointer => Type.usize, .ErrorSet => Type.u16, .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); + const payload_ty = lhs_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :blk Type.u1; } else if (lhs_ty.isPtrLikeOptional(mod)) { @@ -2447,6 +2448,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -2456,8 +2458,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const index_mcv = try self.resolveInst(bin_op.rhs); const slice_ty = self.typeOf(bin_op.lhs); - const elem_ty = slice_ty.childType(); - const mod = self.bin_file.options.module.?; + const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; @@ -2797,7 +2798,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const mod = self.bin_file.options.module.?; - const elem_ty = self.typeOfIndex(inst).elemType(); + const elem_ty = self.typeOfIndex(inst).childType(mod); if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, @@ -3001,9 +3002,9 @@ fn binOp( switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const elem_size = elem_ty.abiSize(mod); @@ -3019,7 +3020,7 @@ fn binOp( // multiplying it with elem_size const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null); - const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null); + const addr = try self.binOp(tag, lhs, offset, Type.manyptr_u8, Type.usize, null); return addr; } }, @@ -4042,11 +4043,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp); const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs); @@ -4269,7 +4266,7 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void { fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { const mod = self.bin_file.options.module.?; - const elem_ty = ptr_ty.elemType(); + const elem_ty = ptr_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); switch (ptr) { @@ -4729,7 +4726,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const ptr_ty = self.typeOf(operand); - const struct_ty = ptr_ty.childType(); + const struct_ty = ptr_ty.childType(mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 7fc5dbc825..96304628e9 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1542,7 +1542,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { const mod = func.bin_file.base.options.module.?; const ptr_ty = func.typeOfIndex(inst); - const pointee_ty = ptr_ty.childType(); + const pointee_ty = ptr_ty.childType(mod); if (func.initial_stack_value == .none) { try func.initializeStack(); @@ -1766,8 +1766,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { }, .Optional => { if (ty.isPtrLikeOptional(mod)) return false; - var buf: Type.Payload.ElemType = undefined; - const pl_type = ty.optionalChild(&buf); + const pl_type = ty.optionalChild(mod); if (pl_type.zigTypeTag(mod) == .ErrorSet) return false; return pl_type.hasRuntimeBitsIgnoreComptime(mod); }, @@ -2139,7 +2138,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; - const child_type = func.typeOfIndex(inst).childType(); + const child_type = func.typeOfIndex(inst).childType(mod); var result = result: { if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { @@ -2161,7 +2160,7 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ret_ty = func.typeOf(un_op).childType(); + const ret_ty = func.typeOf(un_op).childType(mod); const fn_info = func.decl.ty.fnInfo(); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2188,7 +2187,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const mod = func.bin_file.base.options.module.?; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; const ret_ty = fn_ty.fnReturnType(); @@ -2301,8 +2300,8 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); const ptr_ty = func.typeOf(bin_op.lhs); - const ptr_info = ptr_ty.ptrInfo().data; - const ty = ptr_ty.childType(); + const ptr_info = ptr_ty.ptrInfo(mod); + const ty = ptr_ty.childType(mod); if (ptr_info.host_size == 0) { try func.store(lhs, rhs, ty, 0); @@ -2360,8 +2359,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE if (ty.isPtrLikeOptional(mod)) { return func.store(lhs, rhs, Type.usize, 0); } - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); + const pl_ty = ty.optionalChild(mod); if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.u8, 0); } @@ -2454,7 +2452,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); const ptr_ty = func.typeOf(ty_op.operand); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); @@ -2971,7 +2969,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue break :blk field_offset; }, }, - .Pointer => switch (parent_ty.ptrSize()) { + .Pointer => switch (parent_ty.ptrSize(mod)) { .Slice => switch (field_ptr.field_index) { 0 => 0, 1 => func.ptrSize(), @@ -3001,11 +2999,7 @@ fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.In const mod = func.bin_file.base.options.module.?; const decl = mod.declPtr(decl_index); mod.markDeclAlive(decl); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = decl.ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(decl.ty); return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset); } @@ -3145,8 +3139,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, .Optional => if (ty.optionalReprIsPayload(mod)) { - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); + const pl_ty = ty.optionalChild(mod); if (val.castTag(.opt_payload)) |payload| { return func.lowerConstant(payload.data, pl_ty); } else if (val.isNull(mod)) { @@ -3217,8 +3210,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { else => unreachable, }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); + const pl_ty = ty.optionalChild(mod); if (ty.optionalReprIsPayload(mod)) { return func.emitUndefined(pl_ty); } @@ -3403,8 +3395,7 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO assert(!(lhs != .stack and rhs == .stack)); const mod = func.bin_file.base.options.module.?; if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for @@ -3609,19 +3600,21 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn } fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.StructField, ty_pl.payload); const struct_ptr = try func.resolveInst(extra.data.struct_operand); - const struct_ty = func.typeOf(extra.data.struct_operand).childType(); + const struct_ty = func.typeOf(extra.data.struct_operand).childType(mod); const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ty, extra.data.field_index); func.finishAir(inst, result, &.{extra.data.struct_operand}); } fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try func.resolveInst(ty_op.operand); - const struct_ty = func.typeOf(ty_op.operand).childType(); + const struct_ty = func.typeOf(ty_op.operand).childType(mod); const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ty, index); func.finishAir(inst, result, &.{ty_op.operand}); @@ -3640,7 +3633,7 @@ fn structFieldPtr( const offset = switch (struct_ty.containerLayout()) { .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { - if (result_ty.ptrInfo().data.host_size != 0) { + if (result_ty.ptrInfo(mod).host_size != 0) { break :offset @as(u32, 0); } break :offset struct_ty.packedStructFieldByteOffset(index, mod); @@ -3981,7 +3974,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; + const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; const payload_ty = err_ty.errorUnionPayload(); const result = result: { @@ -4009,7 +4002,7 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; + const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; const payload_ty = err_ty.errorUnionPayload(); const result = result: { @@ -4156,11 +4149,12 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const op_ty = func.typeOf(un_op); - const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty; + const optional_ty = if (op_kind == .ptr) op_ty.childType(mod) else op_ty; const is_null = try func.isNull(operand, optional_ty, opcode); const result = try is_null.toLocal(func, optional_ty); func.finishAir(inst, result, &.{un_op}); @@ -4171,8 +4165,7 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { const mod = func.bin_file.base.options.module.?; try func.emitWValue(operand); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const payload_ty = optional_ty.optionalChild(mod); if (!optional_ty.optionalReprIsPayload(mod)) { // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value @@ -4221,14 +4214,13 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(mod); - const mod = func.bin_file.base.options.module.?; const result = result: { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const payload_ty = opt_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4242,9 +4234,8 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const opt_ty = func.typeOf(ty_op.operand).childType(mod); + const payload_ty = opt_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } @@ -4325,13 +4316,13 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const slice_ty = func.typeOf(bin_op.lhs); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = slice_ty.childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); // load pointer onto stack @@ -4355,11 +4346,11 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod); const elem_size = elem_ty.abiSize(mod); const slice = try func.resolveInst(bin_op.lhs); @@ -4436,7 +4427,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const array_ty = func.typeOf(ty_op.operand).childType(); + const array_ty = func.typeOf(ty_op.operand).childType(mod); const slice_ty = func.air.getRefType(ty_op.ty); // create a slice on the stack @@ -4448,7 +4439,7 @@ fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } // store the length of the array in the slice - const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen()) }; + const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) }; try func.store(slice_local, len, Type.usize, func.ptrSize()); func.finishAir(inst, slice_local, &.{ty_op.operand}); @@ -4470,13 +4461,13 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr_ty = func.typeOf(bin_op.lhs); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = ptr_ty.childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = ptr_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); // load pointer onto the stack @@ -4507,12 +4498,12 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = func.typeOf(bin_op.lhs); - const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod); const elem_size = elem_ty.abiSize(mod); const ptr = try func.resolveInst(bin_op.lhs); @@ -4544,9 +4535,9 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const offset = try func.resolveInst(bin_op.rhs); const ptr_ty = func.typeOf(bin_op.lhs); - const pointee_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const pointee_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; const valtype = typeToValtype(Type.usize, mod); @@ -4565,6 +4556,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -4575,16 +4567,16 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const ptr = try func.resolveInst(bin_op.lhs); const ptr_ty = func.typeOf(bin_op.lhs); const value = try func.resolveInst(bin_op.rhs); - const len = switch (ptr_ty.ptrSize()) { + const len = switch (ptr_ty.ptrSize(mod)) { .Slice => try func.sliceLen(ptr), - .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType().arrayLen()) }), + .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }), .C, .Many => unreachable, }; - const elem_ty = if (ptr_ty.ptrSize() == .One) - ptr_ty.childType().childType() + const elem_ty = if (ptr_ty.ptrSize(mod) == .One) + ptr_ty.childType(mod).childType(mod) else - ptr_ty.childType(); + ptr_ty.childType(mod); const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty); try func.memset(elem_ty, dst_ptr, len, value); @@ -4686,13 +4678,13 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue } fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const array_ty = func.typeOf(bin_op.lhs); const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = array_ty.childType(); - const mod = func.bin_file.base.options.module.?; + const elem_ty = array_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); if (isByRef(array_ty, mod)) { @@ -4810,7 +4802,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.typeOfIndex(inst); - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { switch (operand) { @@ -4859,7 +4851,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } const elem_size = elem_ty.bitSize(mod); - const vector_len = @intCast(usize, ty.vectorLen()); + const vector_len = @intCast(usize, ty.vectorLen(mod)); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } @@ -4895,7 +4887,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mask = func.air.values[extra.mask]; const mask_len = extra.mask_len; - const child_ty = inst_ty.childType(); + const child_ty = inst_ty.childType(mod); const elem_size = child_ty.abiSize(mod); // TODO: One of them could be by ref; handle in loop @@ -4959,16 +4951,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const result_ty = func.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const len = @intCast(usize, result_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); const result: WValue = result_value: { switch (result_ty.zigTypeTag(mod)) { .Array => { const result = try func.allocStack(result_ty); - const elem_ty = result_ty.childType(); + const elem_ty = result_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); - const sentinel = if (result_ty.sentinel()) |sent| blk: { + const sentinel = if (result_ty.sentinel(mod)) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; @@ -5190,8 +5182,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: const mod = func.bin_file.base.options.module.?; assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod)); assert(op == .eq or op == .neq); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = operand_ty.optionalChild(&buf); + const payload_ty = operand_ty.optionalChild(mod); // We store the final result in here that will be validated // if the optional is truly equal. @@ -5268,7 +5259,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const un_ty = func.typeOf(bin_op.lhs).childType(); + const un_ty = func.typeOf(bin_op.lhs).childType(mod); const tag_ty = func.typeOf(bin_op.rhs); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); @@ -5398,7 +5389,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const err_set_ty = func.typeOf(ty_op.operand).childType(); + const err_set_ty = func.typeOf(ty_op.operand).childType(mod); const payload_ty = err_set_ty.errorUnionPayload(); const operand = try func.resolveInst(ty_op.operand); @@ -5426,7 +5417,7 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try func.resolveInst(extra.field_ptr); - const parent_ty = func.air.getRefType(ty_pl.ty).childType(); + const parent_ty = func.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const result = if (field_offset != 0) result: { @@ -5455,10 +5446,10 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); const dst_ty = func.typeOf(bin_op.lhs); - const ptr_elem_ty = dst_ty.childType(); + const ptr_elem_ty = dst_ty.childType(mod); const src = try func.resolveInst(bin_op.rhs); const src_ty = func.typeOf(bin_op.rhs); - const len = switch (dst_ty.ptrSize()) { + const len = switch (dst_ty.ptrSize(mod)) { .Slice => blk: { const slice_len = try func.sliceLen(dst); if (ptr_elem_ty.abiSize(mod) != 1) { @@ -5470,7 +5461,7 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(mod)), + .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)), }), .C, .Many => unreachable, }; @@ -5551,7 +5542,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // As the names are global and the slice elements are constant, we do not have // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); - const name_ty = Type.initTag(.const_slice_u8_sentinel_0); + const name_ty = Type.const_slice_u8_sentinel_0; const mod = func.bin_file.base.options.module.?; const abi_size = name_ty.abiSize(mod); @@ -5857,7 +5848,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addLabel(.local_set, overflow_bit.local.value); break :blk try func.wrapOperand(bin_op, lhs_ty); } else if (int_info.bits == 64 and int_info.signedness == .unsigned) blk: { - const new_ty = Type.initTag(.u128); + const new_ty = Type.u128; var lhs_upcast = try (try func.intcast(lhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); defer lhs_upcast.free(func); var rhs_upcast = try (try func.intcast(rhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); @@ -5878,7 +5869,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ lhs, lhs_shifted, rhs, rhs_shifted }, ); const res = try func.allocLocal(lhs_ty); @@ -5902,19 +5893,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mul1 = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ lhs_lsb, zero, rhs_msb, zero }, ); const mul2 = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ rhs_lsb, zero, lhs_msb, zero }, ); const mul3 = try func.callIntrinsic( "__multi3", &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + Type.i128, &.{ lhs_msb, zero, rhs_msb, zero }, ); @@ -5942,7 +5933,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { _ = try func.binOp(lsb_or, mul_add_lt, Type.bool, .@"or"); try func.addLabel(.local_set, overflow_bit.local.value); - const tmp_result = try func.allocStack(Type.initTag(.u128)); + const tmp_result = try func.allocStack(Type.u128); try func.emitWValue(tmp_result); const mul3_msb = try func.load(mul3, Type.u64, 0); try func.store(.stack, mul3_msb, Type.u64, tmp_result.offset()); @@ -6191,11 +6182,12 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try func.resolveInst(extra.data.ptr); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.typeOf(extra.data.ptr).childType(); + const err_union_ty = func.typeOf(extra.data.ptr).childType(mod); const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true); func.finishAir(inst, result, &.{extra.data.ptr}); } @@ -6845,11 +6837,11 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { for (enum_ty.enumFields().keys(), 0..) |tag_name, field_index| { // for each tag name, create an unnamed const, // and then get a pointer to its value. - var name_ty_payload: Type.Payload.Len = .{ - .base = .{ .tag = .array_u8_sentinel_0 }, - .data = @intCast(u64, tag_name.len), - }; - const name_ty = Type.initPayload(&name_ty_payload.base); + const name_ty = try mod.arrayType(.{ + .len = tag_name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const string_bytes = &mod.string_literal_bytes; try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{ @@ -6972,7 +6964,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // finish function body try writer.writeByte(std.wasm.opcode(.end)); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } @@ -7068,7 +7060,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; const ptr_ty = func.typeOf(extra.ptr); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const result_ty = func.typeOfIndex(inst); const ptr_operand = try func.resolveInst(extra.ptr); @@ -7355,7 +7347,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ptr = try func.resolveInst(bin_op.lhs); const operand = try func.resolveInst(bin_op.rhs); const ptr_ty = func.typeOf(bin_op.lhs); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); if (func.useAtomicFeature()) { const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ad67a0db3d..f6304a0ff3 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2259,7 +2259,7 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { const mod = self.bin_file.options.module.?; const ptr_ty = self.typeOfIndex(inst); - const val_ty = ptr_ty.childType(); + const val_ty = ptr_ty.childType(mod); return self.allocFrameIndex(FrameAlloc.init(.{ .size = math.cast(u32, val_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)}); @@ -2289,8 +2289,8 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b 80 => break :need_mem, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { 16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16, 80 => break :need_mem, else => unreachable, @@ -2727,12 +2727,12 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); if (dst_ty.zigTypeTag(mod) == .Vector) { - assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); - const dst_info = dst_ty.childType().intInfo(mod); - const src_info = src_ty.childType().intInfo(mod); + assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod)); + const dst_info = dst_ty.childType(mod).intInfo(mod); + const src_info = src_ty.childType(mod).intInfo(mod); const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) { 8 => switch (src_info.bits) { - 16 => switch (dst_ty.vectorLen()) { + 16 => switch (dst_ty.vectorLen(mod)) { 1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw }, 9...16 => if (self.hasFeature(.avx2)) .{ .vp_b, .ackusw } else null, else => null, @@ -2740,7 +2740,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else => null, }, 16 => switch (src_info.bits) { - 32 => switch (dst_ty.vectorLen()) { + 32 => switch (dst_ty.vectorLen(mod)) { 1...4 => if (self.hasFeature(.avx)) .{ .vp_w, .ackusd } else if (self.hasFeature(.sse4_1)) @@ -2769,14 +2769,10 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { }; const splat_val = Value.initPayload(&splat_pl.base); - var full_pl = Type.Payload.Array{ - .base = .{ .tag = .vector }, - .data = .{ - .len = @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits), - .elem_type = src_ty.childType(), - }, - }; - const full_ty = Type.initPayload(&full_pl.base); + const full_ty = try mod.vectorType(.{ + .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), + .child = src_ty.childType(mod).ip_index, + }); const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); @@ -3587,7 +3583,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const result = result: { const dst_ty = self.typeOfIndex(inst); const src_ty = self.typeOf(ty_op.operand); - const opt_ty = src_ty.childType(); + const opt_ty = src_ty.childType(mod); const src_mcv = try self.resolveInst(ty_op.operand); if (opt_ty.optionalReprIsPayload(mod)) { @@ -3607,7 +3603,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - const pl_ty = dst_ty.childType(); + const pl_ty = dst_ty.childType(mod); const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; @@ -3737,7 +3733,7 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const eu_ty = src_ty.childType(); + const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); @@ -3777,7 +3773,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const eu_ty = src_ty.childType(); + const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(); const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); @@ -3803,7 +3799,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const eu_ty = src_ty.childType(); + const eu_ty = src_ty.childType(mod); const pl_ty = eu_ty.errorUnionPayload(); const err_ty = eu_ty.errorUnionSet(); const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); @@ -4057,7 +4053,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { }; defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = slice_ty.childType(); + const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); @@ -4116,7 +4112,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { }; defer if (array_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); const elem_abi_size = elem_ty.abiSize(mod); const index_ty = self.typeOf(bin_op.rhs); @@ -4253,7 +4249,7 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_union_ty = self.typeOf(bin_op.lhs); - const union_ty = ptr_union_ty.childType(); + const union_ty = ptr_union_ty.childType(mod); const tag_ty = self.typeOf(bin_op.rhs); const layout = union_ty.unionGetLayout(mod); @@ -4287,7 +4283,9 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { break :blk MCValue{ .register = reg }; } else ptr; - var ptr_tag_pl = ptr_union_ty.ptrInfo(); + var ptr_tag_pl: Type.Payload.Pointer = .{ + .data = ptr_union_ty.ptrInfo(mod), + }; ptr_tag_pl.data.pointee_type = tag_ty; const ptr_tag_ty = Type.initPayload(&ptr_tag_pl.base); try self.store(ptr_tag_ty, adjusted_ptr, tag); @@ -4924,14 +4922,11 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { var stack align(@alignOf(ExpectedContents)) = std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - var vec_pl = Type.Payload.Array{ - .base = .{ .tag = .vector }, - .data = .{ - .len = @divExact(abi_size * 8, scalar_bits), - .elem_type = try mod.intType(.signed, scalar_bits), - }, - }; - const vec_ty = Type.initPayload(&vec_pl.base); + const vec_ty = try mod.vectorType(.{ + .len = @divExact(abi_size * 8, scalar_bits), + .child = (try mod.intType(.signed, scalar_bits)).ip_index, + }); + const sign_val = switch (tag) { .neg => try vec_ty.minInt(stack.get(), mod), .fabs => try vec_ty.maxInt(stack.get(), mod), @@ -5034,15 +5029,15 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round }, 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else null, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 2 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else .{ ._pd, .round }, 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else null, @@ -5131,9 +5126,9 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(mod)) { 1 => { try self.asmRegisterRegister( .{ .v_ps, .cvtph2 }, @@ -5184,13 +5179,13 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => null, } else null, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt }, 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else .{ ._ps, .sqrt }, 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else null, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt }, 2 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else .{ ._pd, .sqrt }, 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else null, @@ -5292,7 +5287,7 @@ fn reuseOperandAdvanced( fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const val_ty = ptr_info.pointee_type; const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); @@ -5365,7 +5360,8 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn } fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { - const dst_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const dst_ty = ptr_ty.childType(mod); switch (ptr_mcv) { .none, .unreach, @@ -5424,7 +5420,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { else try self.allocRegOrMem(inst, true); - if (ptr_ty.ptrInfo().data.host_size > 0) { + if (ptr_ty.ptrInfo(mod).host_size > 0) { try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv); } else { try self.load(dst_mcv, ptr_ty, ptr_mcv); @@ -5436,8 +5432,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { const mod = self.bin_file.options.module.?; - const ptr_info = ptr_ty.ptrInfo().data; - const src_ty = ptr_ty.childType(); + const ptr_info = ptr_ty.ptrInfo(mod); + const src_ty = ptr_ty.childType(mod); const limb_abi_size: u16 = @min(ptr_info.host_size, 8); const limb_abi_bits = limb_abi_size * 8; @@ -5509,7 +5505,8 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In } fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const src_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const src_ty = ptr_ty.childType(mod); switch (ptr_mcv) { .none, .unreach, @@ -5544,6 +5541,7 @@ fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerErr } fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -5553,7 +5551,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const ptr_mcv = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const src_mcv = try self.resolveInst(bin_op.rhs); - if (ptr_ty.ptrInfo().data.host_size > 0) { + if (ptr_ty.ptrInfo(mod).host_size > 0) { try self.packedStore(ptr_ty, ptr_mcv, src_mcv); } else { try self.store(ptr_ty, ptr_mcv, src_mcv); @@ -5578,11 +5576,11 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 const mod = self.bin_file.options.module.?; const ptr_field_ty = self.typeOfIndex(inst); const ptr_container_ty = self.typeOf(operand); - const container_ty = ptr_container_ty.childType(); + const container_ty = ptr_container_ty.childType(mod); const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), .Packed => if (container_ty.zigTypeTag(mod) == .Struct and - ptr_field_ty.ptrInfo().data.host_size == 0) + ptr_field_ty.ptrInfo(mod).host_size == 0) container_ty.packedStructFieldByteOffset(index, mod) else 0, @@ -5760,7 +5758,7 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const inst_ty = self.typeOfIndex(inst); - const parent_ty = inst_ty.childType(); + const parent_ty = inst_ty.childType(mod); const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); const src_mcv = try self.resolveInst(extra.field_ptr); @@ -6680,10 +6678,10 @@ fn genBinOp( 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { else => null, - .Int => switch (lhs_ty.childType().intInfo(mod).bits) { - 8 => switch (lhs_ty.vectorLen()) { + .Int => switch (lhs_ty.childType(mod).intInfo(mod).bits) { + 8 => switch (lhs_ty.vectorLen(mod)) { 1...16 => switch (air_tag) { .add, .addwrap, @@ -6694,7 +6692,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .mins } else if (self.hasFeature(.sse4_1)) @@ -6708,7 +6706,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6734,11 +6732,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null, }, @@ -6746,7 +6744,7 @@ fn genBinOp( }, else => null, }, - 16 => switch (lhs_ty.vectorLen()) { + 16 => switch (lhs_ty.vectorLen(mod)) { 1...8 => switch (air_tag) { .add, .addwrap, @@ -6760,7 +6758,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .mins } else @@ -6770,7 +6768,7 @@ fn genBinOp( else .{ .p_w, .minu }, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .maxs } else @@ -6795,11 +6793,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null, }, @@ -6807,7 +6805,7 @@ fn genBinOp( }, else => null, }, - 32 => switch (lhs_ty.vectorLen()) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => switch (air_tag) { .add, .addwrap, @@ -6826,7 +6824,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .mins } else if (self.hasFeature(.sse4_1)) @@ -6840,7 +6838,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6869,11 +6867,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(mod).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(mod).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null, }, @@ -6881,7 +6879,7 @@ fn genBinOp( }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => switch (air_tag) { .add, .addwrap, @@ -6910,8 +6908,8 @@ fn genBinOp( }, else => null, }, - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen()) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen(mod)) { 1 => { const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); @@ -7086,7 +7084,7 @@ fn genBinOp( }, else => null, } else null, - 32 => switch (lhs_ty.vectorLen()) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add }, .sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub }, @@ -7124,7 +7122,7 @@ fn genBinOp( } else null, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add }, .sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub }, @@ -7236,14 +7234,14 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ .v_ss, .cmp }, 2...8 => .{ .v_ps, .cmp }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ .v_sd, .cmp }, 2...4 => .{ .v_pd, .cmp }, else => null, @@ -7270,13 +7268,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...8 => .{ .v_ps, .blendv }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ .v_pd, .blendv }, else => null, }, @@ -7304,14 +7302,14 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ ._ss, .cmp }, 2...4 => .{ ._ps, .cmp }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ ._sd, .cmp }, 2 => .{ ._pd, .cmp }, else => null, @@ -7337,13 +7335,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .blendv }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .blendv }, else => null, }, @@ -7368,13 +7366,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .@"and" }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .@"and" }, else => null, }, @@ -7398,13 +7396,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .andn }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .andn }, else => null, }, @@ -7428,13 +7426,13 @@ fn genBinOp( 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag(mod)) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .@"or" }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .@"or" }, else => null, }, @@ -7586,11 +7584,7 @@ fn genBinOpMir( .load_got, .load_tlv, => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); + const ptr_ty = try mod.singleConstPtrType(ty); const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address()); return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg }, @@ -8058,7 +8052,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -8506,10 +8500,11 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { } fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(mod); const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8683,8 +8678,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - var pl_buf: Type.Payload.ElemType = undefined; - const pl_ty = opt_ty.optionalChild(&pl_buf); + const pl_ty = opt_ty.optionalChild(mod); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) @@ -8775,9 +8769,8 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - const opt_ty = ptr_ty.childType(); - var pl_buf: Type.Payload.ElemType = undefined; - const pl_ty = opt_ty.optionalChild(&pl_buf); + const opt_ty = ptr_ty.childType(mod); + const pl_ty = opt_ty.optionalChild(mod); var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) @@ -8919,6 +8912,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_ptr = try self.resolveInst(un_op); @@ -8939,7 +8933,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); - const result = try self.isErr(inst, ptr_ty.childType(), operand); + const result = try self.isErr(inst, ptr_ty.childType(mod), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8953,6 +8947,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_ptr = try self.resolveInst(un_op); @@ -8973,7 +8968,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); - const result = try self.isNonErr(inst, ptr_ty.childType(), operand); + const result = try self.isNonErr(inst, ptr_ty.childType(mod), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -9452,9 +9447,9 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, else => {}, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Int => switch (ty.childType().intInfo(mod).bits) { - 8 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Int => switch (ty.childType(mod).intInfo(mod).bits) { + 8 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ .insert = .{ .vp_b, .insr }, .extract = .{ .vp_b, .extr }, @@ -9484,7 +9479,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 16 => switch (ty.vectorLen()) { + 16 => switch (ty.vectorLen(mod)) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, @@ -9507,7 +9502,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_d, .mov } else @@ -9523,7 +9518,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_q, .mov } else @@ -9535,7 +9530,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 128 => switch (ty.vectorLen()) { + 128 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, @@ -9543,15 +9538,15 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 256 => switch (ty.vectorLen()) { + 256 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, else => {}, }, - .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => switch (ty.vectorLen()) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 16 => switch (ty.vectorLen(mod)) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, @@ -9574,7 +9569,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_ss, .mov } else @@ -9590,7 +9585,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, else => {}, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_sd, .mov } else @@ -9602,7 +9597,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_pd, .mova } else .{ .v_pd, .movu } }, else => {}, }, - 128 => switch (ty.vectorLen()) { + 128 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, @@ -10248,8 +10243,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.typeOfIndex(inst); const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = array_ty.arrayLen(); + const array_ty = ptr_ty.childType(mod); + const array_len = array_ty.arrayLen(mod); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); @@ -10790,16 +10785,16 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod)); if (elem_abi_size == 1) { - const ptr: MCValue = switch (dst_ptr_ty.ptrSize()) { + const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { // TODO: this only handles slices stored in the stack .Slice => dst_ptr, .One => dst_ptr, .C, .Many => unreachable, }; - const len: MCValue = switch (dst_ptr_ty.ptrSize()) { + const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { // TODO: this only handles slices stored in the stack .Slice => dst_ptr.address().offset(8).deref(), - .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() }, + .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) }, .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { @@ -10815,7 +10810,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // Store the first element, and then rely on memcpy copying forwards. // Length zero requires a runtime check - so we handle arrays specially // here to elide it. - switch (dst_ptr_ty.ptrSize()) { + switch (dst_ptr_ty.ptrSize(mod)) { .Slice => { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf); @@ -10858,13 +10853,9 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { try self.performReloc(skip_reloc); }, .One => { - var elem_ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_mut_pointer }, - .data = elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_pl.base); + const elem_ptr_ty = try mod.singleMutPtrType(elem_ty); - const len = dst_ptr_ty.childType().arrayLen(); + const len = dst_ptr_ty.childType(mod).arrayLen(mod); assert(len != 0); // prevented by Sema try self.store(elem_ptr_ty, dst_ptr, src_val); @@ -10889,6 +10880,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); @@ -10906,9 +10898,9 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { }; defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock); - const len: MCValue = switch (dst_ptr_ty.ptrSize()) { + const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { .Slice => dst_ptr.address().offset(8).deref(), - .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() }, + .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) }, .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { @@ -11059,7 +11051,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { switch (scalar_ty.zigTypeTag(mod)) { else => {}, .Float => switch (scalar_ty.floatBits(self.target.*)) { - 32 => switch (vector_ty.vectorLen()) { + 32 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11139,7 +11131,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, }, - 64 => switch (vector_ty.vectorLen()) { + 64 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11205,7 +11197,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, }, - 128 => switch (vector_ty.vectorLen()) { + 128 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11271,7 +11263,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const result_ty = self.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const len = @intCast(usize, result_ty.arrayLen(mod)); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = result: { @@ -11375,7 +11367,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .Array => { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); - const elem_ty = result_ty.childType(); + const elem_ty = result_ty.childType(mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); for (elements, 0..) |elem, elem_i| { @@ -11387,7 +11379,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_off = @intCast(i32, elem_size * elem_i); try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv); } - if (result_ty.sentinel()) |sentinel| try self.genSetMem( + if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, elem_size * elements.len), elem_ty, @@ -11512,14 +11504,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd132 }, 2...8 => .{ .v_ps, .fmadd132 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd132 }, 2...4 => .{ .v_pd, .fmadd132 }, else => null, @@ -11539,14 +11531,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd213 }, 2...8 => .{ .v_ps, .fmadd213 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd213 }, 2...4 => .{ .v_pd, .fmadd213 }, else => null, @@ -11566,14 +11558,14 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag(mod)) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd231 }, 2...8 => .{ .v_ps, .fmadd231 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd231 }, 2...4 => .{ .v_pd, .fmadd231 }, else => null, diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index c8d20c73fa..ea75a1f4d2 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -76,7 +76,7 @@ pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { }; var result = [1]Class{.none} ** 8; switch (ty.zigTypeTag(mod)) { - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice => { result[0] = .integer; result[1] = .integer; @@ -158,8 +158,8 @@ pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); - const bits = elem_ty.bitSize(mod) * ty.arrayLen(); + const elem_ty = ty.childType(mod); + const bits = elem_ty.bitSize(mod) * ty.arrayLen(mod); if (bits <= 64) return .{ .sse, .none, .none, .none, .none, .none, .none, .none, diff --git a/src/codegen.zig b/src/codegen.zig index c9e2c6c265..a807400502 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -230,7 +230,7 @@ pub fn generateSymbol( .Array => switch (typed_value.val.tag()) { .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel()); + const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); // The bytes payload already includes the sentinel, if any try code.ensureUnusedCapacity(len); code.appendSliceAssumeCapacity(bytes[0..len]); @@ -241,7 +241,7 @@ pub fn generateSymbol( const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; try code.ensureUnusedCapacity(bytes.len + 1); code.appendSliceAssumeCapacity(bytes); - if (typed_value.ty.sentinel()) |sent_val| { + if (typed_value.ty.sentinel(mod)) |sent_val| { const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); code.appendAssumeCapacity(byte); } @@ -249,8 +249,8 @@ pub fn generateSymbol( }, .aggregate => { const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.elemType(); - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel()); + const elem_ty = typed_value.ty.childType(mod); + const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); for (elem_vals[0..len]) |elem_val| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = elem_ty, @@ -264,9 +264,9 @@ pub fn generateSymbol( }, .repeated => { const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(); - const sentinel = typed_value.ty.sentinel(); - const len = typed_value.ty.arrayLen(); + const elem_ty = typed_value.ty.childType(mod); + const sentinel = typed_value.ty.sentinel(mod); + const len = typed_value.ty.arrayLen(mod); var index: u64 = 0; while (index < len) : (index += 1) { @@ -292,8 +292,8 @@ pub fn generateSymbol( return Result.ok; }, .empty_array_sentinel => { - const elem_ty = typed_value.ty.childType(); - const sentinel_val = typed_value.ty.sentinel().?; + const elem_ty = typed_value.ty.childType(mod); + const sentinel_val = typed_value.ty.sentinel(mod).?; switch (try generateSymbol(bin_file, src_loc, .{ .ty = elem_ty, .val = sentinel_val, @@ -618,8 +618,7 @@ pub fn generateSymbol( return Result.ok; }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_type = typed_value.ty.optionalChild(&opt_buf); + const payload_type = typed_value.ty.optionalChild(mod); const is_pl = !typed_value.val.isNull(mod); const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; @@ -751,7 +750,7 @@ pub fn generateSymbol( .Vector => switch (typed_value.val.tag()) { .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; + const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse return error.Overflow; try code.ensureUnusedCapacity(len + padding); @@ -761,8 +760,8 @@ pub fn generateSymbol( }, .aggregate => { const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.elemType(); - const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; + const elem_ty = typed_value.ty.childType(mod); + const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; const padding = math.cast(usize, typed_value.ty.abiSize(mod) - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, @@ -782,8 +781,8 @@ pub fn generateSymbol( }, .repeated => { const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(); - const len = typed_value.ty.arrayLen(); + const elem_ty = typed_value.ty.childType(mod); + const len = typed_value.ty.arrayLen(mod); const padding = math.cast(usize, typed_value.ty.abiSize(mod) - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, @@ -1188,7 +1187,7 @@ pub fn genTypedValue( switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), - .Pointer => switch (typed_value.ty.ptrSize()) { + .Pointer => switch (typed_value.ty.ptrSize(mod)) { .Slice => {}, else => { switch (typed_value.val.tag()) { @@ -1219,9 +1218,8 @@ pub fn genTypedValue( if (typed_value.ty.isPtrLikeOptional(mod)) { if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); - var buf: Type.Payload.ElemType = undefined; return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.optionalChild(&buf), + .ty = typed_value.ty.optionalChild(mod), .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, }, owner_decl_index); } else if (typed_value.ty.abiSize(mod) == 1) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index cd4f36e574..e6ec461e43 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -625,7 +625,9 @@ pub const DeclGen = struct { // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(field_ptr.container_ty, .complete); - var container_ptr_pl = ptr_ty.ptrInfo(); + var container_ptr_pl: Type.Payload.Pointer = .{ + .data = ptr_ty.ptrInfo(mod), + }; container_ptr_pl.data.pointee_type = field_ptr.container_ty; const container_ptr_ty = Type.initPayload(&container_ptr_pl.base); @@ -653,7 +655,9 @@ pub const DeclGen = struct { try dg.writeCValue(writer, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -692,11 +696,10 @@ pub const DeclGen = struct { }, .elem_ptr => { const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var elem_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = elem_ptr.elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base); + const elem_ptr_ty = try mod.ptrType(.{ + .size = .C, + .elem_type = elem_ptr.elem_ty.ip_index, + }); try writer.writeAll("&("); try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location); @@ -704,11 +707,10 @@ pub const DeclGen = struct { }, .opt_payload_ptr, .eu_payload_ptr => { const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - var container_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = payload_ptr.container_ty, - }; - const container_ptr_ty = Type.initPayload(&container_ptr_ty_pl.base); + const container_ptr_ty = try mod.ptrType(.{ + .elem_type = payload_ptr.container_ty.ip_index, + .size = .C, + }); // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(payload_ptr.container_ty, .complete); @@ -794,8 +796,7 @@ pub const DeclGen = struct { return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, Type.bool, val, location); @@ -889,11 +890,11 @@ pub const DeclGen = struct { return writer.writeAll(" }"); }, .Array, .Vector => { - const ai = ty.arrayInfo(); + const ai = ty.arrayInfo(mod); if (ai.elem_type.eql(Type.u8, dg.module)) { var literal = stringLiteral(writer); try literal.start(); - const c_len = ty.arrayLenIncludingSentinel(); + const c_len = ty.arrayLenIncludingSentinel(mod); var index: u64 = 0; while (index < c_len) : (index += 1) try literal.writeChar(0xaa); @@ -906,11 +907,11 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - const c_len = ty.arrayLenIncludingSentinel(); + const c_len = ty.arrayLenIncludingSentinel(mod); var index: u64 = 0; while (index < c_len) : (index += 1) { if (index > 0) try writer.writeAll(", "); - try dg.renderValue(writer, ty.childType(), val, initializer_type); + try dg.renderValue(writer, ty.childType(mod), val, initializer_type); } return writer.writeByte('}'); } @@ -1110,7 +1111,7 @@ pub const DeclGen = struct { // First try specific tag representations for more efficiency. switch (val.tag()) { .undef, .empty_struct_value, .empty_array => { - const ai = ty.arrayInfo(); + const ai = ty.arrayInfo(mod); try writer.writeByte('{'); if (ai.sentinel) |s| { try dg.renderValue(writer, ai.elem_type, s, initializer_type); @@ -1128,9 +1129,9 @@ pub const DeclGen = struct { }, else => unreachable, }; - const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; + const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; try writer.print("{s}", .{ - fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel), + fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel), }); }, else => { @@ -1142,7 +1143,7 @@ pub const DeclGen = struct { // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal const max_string_initializer_len = 65535; - const ai = ty.arrayInfo(); + const ai = ty.arrayInfo(mod); if (ai.elem_type.eql(Type.u8, dg.module)) { if (ai.len <= max_string_initializer_len) { var literal = stringLiteral(writer); @@ -1198,8 +1199,7 @@ pub const DeclGen = struct { } }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); const is_null_val = Value.makeBool(val.tag() == .null_value); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) @@ -2410,12 +2410,13 @@ pub fn genGlobalAsm(mod: *Module, writer: anytype) !void { } pub fn genErrDecls(o: *Object) !void { + const mod = o.dg.module; const writer = o.writer(); try writer.writeAll("enum {\n"); o.indent_writer.pushIndent(); var max_name_len: usize = 0; - for (o.dg.module.error_name_list.items, 0..) |name, value| { + for (mod.error_name_list.items, 0..) |name, value| { max_name_len = std.math.max(name.len, max_name_len); var err_pl = Value.Payload.Error{ .data = .{ .name = name } }; try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other); @@ -2430,12 +2431,15 @@ pub fn genErrDecls(o: *Object) !void { defer o.dg.gpa.free(name_buf); @memcpy(name_buf[0..name_prefix.len], name_prefix); - for (o.dg.module.error_name_list.items) |name| { + for (mod.error_name_list.items) |name| { @memcpy(name_buf[name_prefix.len..][0..name.len], name); const identifier = name_buf[0 .. name_prefix.len + name.len]; - var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len }; - const name_ty = Type.initPayload(&name_ty_pl.base); + const name_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); @@ -2448,15 +2452,15 @@ pub fn genErrDecls(o: *Object) !void { } var name_array_ty_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{ - .len = o.dg.module.error_name_list.items.len, - .elem_type = Type.initTag(.const_slice_u8_sentinel_0), + .len = mod.error_name_list.items.len, + .elem_type = Type.const_slice_u8_sentinel_0, } }; const name_array_ty = Type.initPayload(&name_array_ty_pl.base); try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete); try writer.writeAll(" = {"); - for (o.dg.module.error_name_list.items, 0..) |name, value| { + for (mod.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; @@ -2487,6 +2491,7 @@ fn genExports(o: *Object) !void { } pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { + const mod = o.dg.module; const w = o.writer(); const key = lazy_fn.key_ptr.*; const val = lazy_fn.value_ptr; @@ -2495,7 +2500,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { .tag_name => { const enum_ty = val.data.tag_name; - const name_slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const name_slice_ty = Type.const_slice_u8_sentinel_0; try w.writeAll("static "); try o.dg.renderType(w, name_slice_ty); @@ -2514,11 +2519,11 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { var int_pl: Value.Payload.U64 = undefined; const int_val = tag_val.enumToInt(enum_ty, &int_pl); - var name_ty_pl = Type.Payload.Len{ - .base = .{ .tag = .array_u8_sentinel_0 }, - .data = name.len, - }; - const name_ty = Type.initPayload(&name_ty_pl.base); + const name_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); @@ -2547,7 +2552,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try w.writeAll("}\n"); }, .never_tail, .never_inline => |fn_decl_index| { - const fn_decl = o.dg.module.declPtr(fn_decl_index); + const fn_decl = mod.declPtr(fn_decl_index); const fn_cty = try o.dg.typeToCType(fn_decl.ty, .complete); const fn_info = fn_cty.cast(CType.Payload.Function).?.data; @@ -3150,7 +3155,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(mod); const ptr = try f.resolveInst(bin_op.lhs); @@ -3166,7 +3171,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, inst_ty); try writer.writeByte(')'); if (elem_has_bits) try writer.writeByte('&'); - if (elem_has_bits and ptr_ty.ptrSize() == .One) { + if (elem_has_bits and ptr_ty.ptrSize(mod) == .One) { // It's a pointer to an array, so we need to de-reference. try f.writeCValueDeref(writer, ptr); } else try f.writeCValue(writer, ptr, .Other); @@ -3264,7 +3269,7 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const inst_ty = f.typeOfIndex(inst); - const elem_type = inst_ty.elemType(); + const elem_type = inst_ty.childType(mod); if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue( @@ -3280,7 +3285,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const inst_ty = f.typeOfIndex(inst); - const elem_ty = inst_ty.elemType(); + const elem_ty = inst_ty.childType(mod); if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue( @@ -3323,7 +3328,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ptr_ty = f.typeOf(ty_op.operand); const ptr_scalar_ty = ptr_ty.scalarType(mod); - const ptr_info = ptr_scalar_ty.ptrInfo().data; + const ptr_info = ptr_scalar_ty.ptrInfo(mod); const src_ty = ptr_info.pointee_type; if (!src_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -3412,7 +3417,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const writer = f.object.writer(); const op_inst = Air.refToIndex(un_op); const op_ty = f.typeOf(un_op); - const ret_ty = if (is_ptr) op_ty.childType() else op_ty; + const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty; var lowered_ret_buf: LowerFnRetTyBuffer = undefined; const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); @@ -3601,7 +3606,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_ty = f.typeOf(bin_op.lhs); const ptr_scalar_ty = ptr_ty.scalarType(mod); - const ptr_info = ptr_scalar_ty.ptrInfo().data; + const ptr_info = ptr_scalar_ty.ptrInfo(mod); const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); @@ -4156,7 +4161,7 @@ fn airCall( const callee_ty = f.typeOf(pl_op.operand); const fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, - .Pointer => callee_ty.childType(), + .Pointer => callee_ty.childType(mod), else => unreachable, }; @@ -4331,10 +4336,11 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.TryPtr, ty_pl.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = f.typeOf(extra.data.ptr).childType(); + const err_union_ty = f.typeOf(extra.data.ptr).childType(mod); return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true); } @@ -4826,7 +4832,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; if (is_reg) { - const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(); + const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(mod); try writer.writeAll("register "); const alignment = 0; const local_value = try f.allocLocalValue(output_ty, alignment); @@ -5061,9 +5067,8 @@ fn airIsNull( } const operand_ty = f.typeOf(un_op); - const optional_ty = if (is_ptr) operand_ty.childType() else operand_ty; - var payload_buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&payload_buf); + const optional_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; + const payload_ty = optional_ty.optionalChild(mod); var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) @@ -5097,8 +5102,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const opt_ty = f.typeOf(ty_op.operand); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); + const payload_ty = opt_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return .none; @@ -5132,10 +5136,10 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const ptr_ty = f.typeOf(ty_op.operand); - const opt_ty = ptr_ty.childType(); + const opt_ty = ptr_ty.childType(mod); const inst_ty = f.typeOfIndex(inst); - if (!inst_ty.childType().hasRuntimeBitsIgnoreComptime(mod)) { + if (!inst_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod)) { return .{ .undef = inst_ty }; } @@ -5163,7 +5167,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const operand_ty = f.typeOf(ty_op.operand); - const opt_ty = operand_ty.elemType(); + const opt_ty = operand_ty.childType(mod); const inst_ty = f.typeOfIndex(inst); @@ -5221,7 +5225,7 @@ fn fieldLocation( else .{ .identifier = container_ty.structFieldName(next_field_index) } }; } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, - .Packed => if (field_ptr_ty.ptrInfo().data.host_size == 0) + .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0) .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } else .begin, @@ -5243,7 +5247,7 @@ fn fieldLocation( }, .Packed => .begin, }, - .Pointer => switch (container_ty.ptrSize()) { + .Pointer => switch (container_ty.ptrSize(mod)) { .Slice => switch (field_index) { 0 => .{ .field = .{ .identifier = "ptr" } }, 1 => .{ .field = .{ .identifier = "len" } }, @@ -5280,7 +5284,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const container_ptr_ty = f.typeOfIndex(inst); - const container_ty = container_ptr_ty.childType(); + const container_ty = container_ptr_ty.childType(mod); const field_ptr_ty = f.typeOf(extra.field_ptr); const field_ptr_val = try f.resolveInst(extra.field_ptr); @@ -5296,7 +5300,9 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = field_ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -5311,7 +5317,9 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("))"); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = field_ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -5345,7 +5353,7 @@ fn fieldPtr( field_index: u32, ) !CValue { const mod = f.object.dg.module; - const container_ty = container_ptr_ty.elemType(); + const container_ty = container_ptr_ty.childType(mod); const field_ptr_ty = f.typeOfIndex(inst); // Ensure complete type definition is visible before accessing fields. @@ -5365,7 +5373,9 @@ fn fieldPtr( try f.writeCValueDerefMember(writer, container_ptr_val, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = field_ptr_ty.ptrInfo(); + var u8_ptr_pl: Type.Payload.Pointer = .{ + .data = field_ptr_ty.ptrInfo(mod), + }; u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); @@ -5532,7 +5542,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; - const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const error_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const local = try f.allocLocal(inst, inst_ty); @@ -5569,7 +5579,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const operand_ty = f.typeOf(ty_op.operand); - const error_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; + const error_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const writer = f.object.writer(); if (!error_union_ty.errorUnionPayload().hasRuntimeBits(mod)) { @@ -5673,7 +5683,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const ty_op = f.air.instructions.items(.data)[inst].ty_op; const operand = try f.resolveInst(ty_op.operand); - const error_union_ty = f.typeOf(ty_op.operand).childType(); + const error_union_ty = f.typeOf(ty_op.operand).childType(mod); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); @@ -5761,7 +5771,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const try reap(f, inst, &.{un_op}); const operand_ty = f.typeOf(un_op); const local = try f.allocLocal(inst, Type.bool); - const err_union_ty = if (is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); const error_ty = err_union_ty.errorUnionSet(); @@ -5795,7 +5805,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - const array_ty = f.typeOf(ty_op.operand).childType(); + const array_ty = f.typeOf(ty_op.operand).childType(mod); try f.writeCValueMember(writer, local, .{ .identifier = "ptr" }); try writer.writeAll(" = "); @@ -5811,7 +5821,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); - const array_len = array_ty.arrayLen(); + const array_len = array_ty.arrayLen(mod); var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len }; const len_val = Value.initPayload(&len_pl.base); try f.writeCValueMember(writer, local, .{ .identifier = "len" }); @@ -6050,7 +6060,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const expected_value = try f.resolveInst(extra.expected_value); const new_value = try f.resolveInst(extra.new_value); const ptr_ty = f.typeOf(extra.ptr); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const writer = f.object.writer(); const new_value_mat = try Materialize.start(f, inst, writer, ty, new_value); @@ -6152,7 +6162,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(pl_op.operand); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const ptr = try f.resolveInst(pl_op.operand); const operand = try f.resolveInst(extra.operand); @@ -6207,7 +6217,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const ptr = try f.resolveInst(atomic_load.ptr); try reap(f, inst, &.{atomic_load.ptr}); const ptr_ty = f.typeOf(atomic_load.ptr); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const repr_ty = if (ty.isRuntimeFloat()) mod.intType(.unsigned, @intCast(u16, ty.abiSize(mod) * 8)) catch unreachable @@ -6241,7 +6251,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa const mod = f.object.dg.module; const bin_op = f.air.instructions.items(.data)[inst].bin_op; const ptr_ty = f.typeOf(bin_op.lhs); - const ty = ptr_ty.childType(); + const ty = ptr_ty.childType(mod); const ptr = try f.resolveInst(bin_op.lhs); const element = try f.resolveInst(bin_op.rhs); @@ -6299,7 +6309,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } try writer.writeAll("memset("); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" }); try writer.writeAll(", 0xaa, "); @@ -6311,8 +6321,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } }, .One => { - const array_ty = dest_ty.childType(); - const len = array_ty.arrayLen() * elem_abi_size; + const array_ty = dest_ty.childType(mod); + const len = array_ty.arrayLen(mod) * elem_abi_size; try f.writeCValue(writer, dest_slice, .FunctionArgument); try writer.print(", 0xaa, {d});\n", .{len}); @@ -6327,11 +6337,10 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { // For the assignment in this loop, the array pointer needs to get // casted to a regular pointer, otherwise an error like this occurs: // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable - var elem_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base); + const elem_ptr_ty = try mod.ptrType(.{ + .size = .C, + .elem_type = elem_ty.ip_index, + }); const index = try f.allocLocal(inst, Type.usize); @@ -6342,13 +6351,13 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" }); }, .One => { - const array_ty = dest_ty.childType(); - try writer.print("{d}", .{array_ty.arrayLen()}); + const array_ty = dest_ty.childType(mod); + try writer.print("{d}", .{array_ty.arrayLen(mod)}); }, .Many, .C => unreachable, } @@ -6377,7 +6386,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const bitcasted = try bitcast(f, Type.u8, value, elem_ty); try writer.writeAll("memset("); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" }); try writer.writeAll(", "); @@ -6387,8 +6396,8 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll(");\n"); }, .One => { - const array_ty = dest_ty.childType(); - const len = array_ty.arrayLen() * elem_abi_size; + const array_ty = dest_ty.childType(mod); + const len = array_ty.arrayLen(mod) * elem_abi_size; try f.writeCValue(writer, dest_slice, .FunctionArgument); try writer.writeAll(", "); @@ -6416,9 +6425,9 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", "); try writeSliceOrPtr(f, writer, src_ptr, src_ty); try writer.writeAll(", "); - switch (dest_ty.ptrSize()) { + switch (dest_ty.ptrSize(mod)) { .Slice => { - const elem_ty = dest_ty.childType(); + const elem_ty = dest_ty.childType(mod); const elem_abi_size = elem_ty.abiSize(mod); try f.writeCValueMember(writer, dest_ptr, .{ .identifier = "len" }); if (elem_abi_size > 1) { @@ -6428,10 +6437,10 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { } }, .One => { - const array_ty = dest_ty.childType(); - const elem_ty = array_ty.childType(); + const array_ty = dest_ty.childType(mod); + const elem_ty = array_ty.childType(mod); const elem_abi_size = elem_ty.abiSize(mod); - const len = array_ty.arrayLen() * elem_abi_size; + const len = array_ty.arrayLen(mod) * elem_abi_size; try writer.print("{d});\n", .{len}); }, .Many, .C => unreachable, @@ -6448,7 +6457,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const new_tag = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const union_ty = f.typeOf(bin_op.lhs).childType(); + const union_ty = f.typeOf(bin_op.lhs).childType(mod); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; const tag_ty = union_ty.unionTagTypeSafety().?; @@ -6777,7 +6786,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const inst_ty = f.typeOfIndex(inst); - const len = @intCast(usize, inst_ty.arrayLen()); + const len = @intCast(usize, inst_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, f.air.extra[ty_pl.payload..][0..len]); const gpa = f.object.dg.gpa; const resolved_elements = try gpa.alloc(CValue, elements.len); @@ -6796,7 +6805,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => { - const elem_ty = inst_ty.childType(); + const elem_ty = inst_ty.childType(mod); const a = try Assignment.init(f, elem_ty); for (resolved_elements, 0..) |element, i| { try a.restart(f, writer); @@ -6806,7 +6815,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, element, .Other); try a.end(f, writer); } - if (inst_ty.sentinel()) |sentinel| { + if (inst_ty.sentinel(mod)) |sentinel| { try a.restart(f, writer); try f.writeCValue(writer, local, .Other); try writer.print("[{d}]", .{resolved_elements.len}); @@ -7708,7 +7717,7 @@ const Vectorize = struct { pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { const mod = f.object.dg.module; return if (ty.zigTypeTag(mod) == .Vector) index: { - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen() }; + var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen(mod) }; const local = try f.allocLocal(inst, Type.usize); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 27fa997fd3..9e6de6cb21 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1423,7 +1423,7 @@ pub const CType = extern union { }), .Pointer => { - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); switch (info.size) { .Slice => { if (switch (kind) { @@ -1625,9 +1625,9 @@ pub const CType = extern union { .Vector => .vector, else => unreachable, }; - if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| { + if (try lookup.typeToIndex(ty.childType(mod), kind)) |child_idx| { self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{ - .len = ty.arrayLenIncludingSentinel(), + .len = ty.arrayLenIncludingSentinel(mod), .elem_type = child_idx, } } }; self.value = .{ .cty = initPayload(&self.storage.seq) }; @@ -1639,8 +1639,7 @@ pub const CType = extern union { }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (ty.optionalReprIsPayload(mod)) { try self.initType(payload_ty, kind, lookup); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5d9345c84f..f45a63df72 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -597,7 +597,7 @@ pub const Object = struct { llvm_usize_ty, }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const error_name_list = mod.error_name_list.items; @@ -1071,7 +1071,7 @@ pub const Object = struct { .slice => { assert(!it.byval_attr); const param_ty = fn_info.param_types[it.zig_index - 1]; - const ptr_info = param_ty.ptrInfo().data; + const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, it.zig_index - 1)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { @@ -1596,7 +1596,7 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); if (ptr_info.sentinel != null or ptr_info.@"addrspace" != .generic or @@ -1755,8 +1755,8 @@ pub const Object = struct { const array_di_ty = dib.createArrayType( ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, - try o.lowerDebugType(ty.childType(), .full), - @intCast(c_int, ty.arrayLen()), + try o.lowerDebugType(ty.childType(mod), .full), + @intCast(c_int, ty.arrayLen(mod)), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module }); @@ -1781,14 +1781,14 @@ pub const Object = struct { break :blk dib.createBasicType(name, info.bits, dwarf_encoding); }, .Bool => dib.createBasicType("bool", 1, DW.ATE.boolean), - else => try o.lowerDebugType(ty.childType(), .full), + else => try o.lowerDebugType(ty.childType(mod), .full), }; const vector_di_ty = dib.createVectorType( ty.abiSize(mod) * 8, ty.abiAlignment(mod) * 8, elem_di_type, - ty.vectorLen(), + ty.vectorLen(mod), ); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module }); @@ -1797,8 +1797,7 @@ pub const Object = struct { .Optional => { const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { const di_bits = 8; // lldb cannot handle non-byte sized types const di_ty = dib.createBasicType(name, di_bits, DW.ATE.boolean); @@ -2350,11 +2349,7 @@ pub const Object = struct { try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); if (sret) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = fn_info.return_type, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(fn_info.return_type); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } } else { @@ -2364,11 +2359,7 @@ pub const Object = struct { if (fn_info.return_type.isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = o.getStackTraceType(), - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } @@ -2376,11 +2367,7 @@ pub const Object = struct { if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (isByRef(param_ty, mod)) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = param_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(param_ty); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } else { try param_di_types.append(try o.lowerDebugType(param_ty, .full)); @@ -2843,7 +2830,7 @@ pub const DeclGen = struct { }; return dg.context.structType(&fields, fields.len, .False); } - const ptr_info = t.ptrInfo().data; + const ptr_info = t.ptrInfo(mod); const llvm_addrspace = toLlvmAddressSpace(ptr_info.@"addrspace", target); return dg.context.pointerType(llvm_addrspace); }, @@ -2866,19 +2853,18 @@ pub const DeclGen = struct { return llvm_struct_ty; }, .Array => { - const elem_ty = t.childType(); + const elem_ty = t.childType(mod); assert(elem_ty.onePossibleValue(mod) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); - const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); + const total_len = t.arrayLen(mod) + @boolToInt(t.sentinel(mod) != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); }, .Vector => { - const elem_type = try dg.lowerType(t.childType()); - return elem_type.vectorType(t.vectorLen()); + const elem_type = try dg.lowerType(t.childType(mod)); + return elem_type.vectorType(t.vectorLen(mod)); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = t.optionalChild(&buf); + const child_ty = t.optionalChild(mod); if (!child_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.context.intType(8); } @@ -3173,11 +3159,7 @@ pub const DeclGen = struct { if (fn_info.return_type.isError(mod) and mod.comp.bin_file.options.error_return_tracing) { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = dg.object.getStackTraceType(), - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType()); try llvm_params.append(try dg.lowerType(ptr_ty)); } @@ -3199,9 +3181,8 @@ pub const DeclGen = struct { .slice => { const param_ty = fn_info.param_types[it.zig_index - 1]; var buf: Type.SlicePtrFieldTypeBuffer = undefined; - var opt_buf: Type.Payload.ElemType = undefined; const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) - param_ty.optionalChild(&opt_buf).slicePtrFieldType(&buf) + param_ty.optionalChild(mod).slicePtrFieldType(&buf) else param_ty.slicePtrFieldType(&buf); const ptr_llvm_ty = try dg.lowerType(ptr_ty); @@ -3247,7 +3228,7 @@ pub const DeclGen = struct { const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, .Fn => !elem_ty.fnInfo().is_generic, - .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(mod), + .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; const llvm_elem_ty = if (lower_elem_ty) @@ -3417,7 +3398,7 @@ pub const DeclGen = struct { return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); }, .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo().data.bit_offset % 8 == 0); + return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); }, .null_value, .zero => { const llvm_type = try dg.lowerType(tv.ty); @@ -3425,7 +3406,7 @@ pub const DeclGen = struct { }, .opt_payload => { const payload = tv.val.castTag(.opt_payload).?.data; - return dg.lowerParentPtr(payload, tv.ty.ptrInfo().data.bit_offset % 8 == 0); + return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); }, else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ tv.ty.fmtDebug(), tag, @@ -3436,14 +3417,14 @@ pub const DeclGen = struct { const bytes = tv.val.castTag(.bytes).?.data; return dg.context.constString( bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel()), + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), .True, // Don't null terminate. Bytes has the sentinel, if any. ); }, .str_lit => { const str_lit = tv.val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel()) |sent_val| { + if (tv.ty.sentinel(mod)) |sent_val| { const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); if (byte == 0 and bytes.len > 0) { return dg.context.constString( @@ -3472,9 +3453,9 @@ pub const DeclGen = struct { }, .aggregate => { const elem_vals = tv.val.castTag(.aggregate).?.data; - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const gpa = dg.gpa; - const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel()); + const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod)); const llvm_elems = try gpa.alloc(*llvm.Value, len); defer gpa.free(llvm_elems); var need_unnamed = false; @@ -3498,9 +3479,9 @@ pub const DeclGen = struct { }, .repeated => { const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.elemType(); - const sentinel = tv.ty.sentinel(); - const len = @intCast(usize, tv.ty.arrayLen()); + const elem_ty = tv.ty.childType(mod); + const sentinel = tv.ty.sentinel(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); const len_including_sent = len + @boolToInt(sentinel != null); const gpa = dg.gpa; const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); @@ -3534,8 +3515,8 @@ pub const DeclGen = struct { } }, .empty_array_sentinel => { - const elem_ty = tv.ty.elemType(); - const sent_val = tv.ty.sentinel().?; + const elem_ty = tv.ty.childType(mod); + const sent_val = tv.ty.sentinel(mod).?; const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); const llvm_elems: [1]*llvm.Value = .{sentinel}; const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); @@ -3550,8 +3531,7 @@ pub const DeclGen = struct { }, .Optional => { comptime assert(optional_layout_version == 3); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = tv.ty.optionalChild(&buf); + const payload_ty = tv.ty.optionalChild(mod); const llvm_i8 = dg.context.intType(8); const is_pl = !tv.val.isNull(mod); @@ -3897,10 +3877,10 @@ pub const DeclGen = struct { .bytes => { // Note, sentinel is not stored even if the type has a sentinel. const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen()); + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { @@ -3923,9 +3903,9 @@ pub const DeclGen = struct { // Note, sentinel is not stored even if the type has a sentinel. // The value includes the sentinel in those cases. const elem_vals = tv.val.castTag(.aggregate).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen()); + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len); - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { @@ -3939,8 +3919,8 @@ pub const DeclGen = struct { .repeated => { // Note, sentinel is not stored even if the type has a sentinel. const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.elemType(); - const len = @intCast(usize, tv.ty.arrayLen()); + const elem_ty = tv.ty.childType(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); const llvm_elems = try dg.gpa.alloc(*llvm.Value, len); defer dg.gpa.free(llvm_elems); for (llvm_elems) |*elem| { @@ -3955,10 +3935,10 @@ pub const DeclGen = struct { // Note, sentinel is not stored const str_lit = tv.val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const vector_len = @intCast(usize, tv.ty.arrayLen()); + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); assert(vector_len == bytes.len); - const elem_ty = tv.ty.elemType(); + const elem_ty = tv.ty.childType(mod); const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { @@ -4006,13 +3986,10 @@ pub const DeclGen = struct { ptr_val: Value, decl_index: Module.Decl.Index, ) Error!*llvm.Value { - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = decl.ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + mod.markDeclAlive(decl); + const ptr_ty = try mod.singleMutPtrType(decl.ty); return try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); } @@ -4135,9 +4112,8 @@ pub const DeclGen = struct { .opt_payload_ptr => { const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, true); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); + const payload_ty = opt_payload_ptr.container_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or payload_ty.optionalReprIsPayload(mod)) { @@ -4251,7 +4227,8 @@ pub const DeclGen = struct { } fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*llvm.Value { - const alignment = ptr_ty.ptrInfo().data.@"align"; + const mod = dg.module; + const alignment = ptr_ty.ptrInfo(mod).@"align"; // Even though we are pointing at something which has zero bits (e.g. `void`), // Pointers are defined to have bits. So we must return something here. // The value cannot be undefined, because we use the `nonnull` annotation @@ -4374,7 +4351,7 @@ pub const DeclGen = struct { ) void { const mod = dg.module; if (param_ty.isPtrAtRuntime(mod)) { - const ptr_info = param_ty.ptrInfo().data; + const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, param_index)) |i| { if (@truncate(u1, fn_info.noalias_bits >> i) != 0) { dg.addArgAttr(llvm_fn, llvm_arg_i, "noalias"); @@ -4786,7 +4763,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, - .Pointer => callee_ty.childType(), + .Pointer => callee_ty.childType(mod), else => unreachable, }; const fn_info = zig_fn_ty.fnInfo(); @@ -5014,7 +4991,7 @@ pub const FuncGen = struct { .slice => { assert(!it.byval_attr); const param_ty = fn_info.param_types[it.zig_index - 1]; - const ptr_info = param_ty.ptrInfo().data; + const ptr_info = param_ty.ptrInfo(mod); const llvm_arg_i = it.llvm_index - 2; if (math.cast(u5, it.zig_index - 1)) |i| { @@ -5098,11 +5075,7 @@ pub const FuncGen = struct { const ret_ty = self.typeOf(un_op); if (self.ret_ptr) |ret_ptr| { const operand = try self.resolveInst(un_op); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(ret_ptr, ptr_ty, operand, .NotAtomic); _ = self.builder.buildRetVoid(); return null; @@ -5150,11 +5123,11 @@ pub const FuncGen = struct { } fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.typeOf(un_op); - const ret_ty = ptr_ty.childType(); + const ret_ty = ptr_ty.childType(mod); const fn_info = self.dg.decl.ty.fnInfo(); - const mod = self.dg.module; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (fn_info.return_type.isError(mod)) { // Functions with an empty error set are emitted with an error code @@ -5301,15 +5274,13 @@ pub const FuncGen = struct { operand_ty: Type, op: math.CompareOperator, ) Allocator.Error!*llvm.Value { - var opt_buffer: Type.Payload.ElemType = undefined; - const mod = self.dg.module; const scalar_ty = operand_ty.scalarType(mod); const int_ty = switch (scalar_ty.zigTypeTag(mod)) { .Enum => scalar_ty.intTagType(), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { - const payload_ty = operand_ty.optionalChild(&opt_buffer); + const payload_ty = operand_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or operand_ty.optionalReprIsPayload(mod)) { @@ -5506,11 +5477,12 @@ pub const FuncGen = struct { } fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try self.resolveInst(extra.data.ptr); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(mod); const is_unused = self.liveness.isUnused(inst); return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused); } @@ -5661,9 +5633,9 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.typeOf(ty_op.operand); - const array_ty = operand_ty.childType(); + const array_ty = operand_ty.childType(mod); const llvm_usize = try self.dg.lowerType(Type.usize); - const len = llvm_usize.constInt(array_ty.arrayLen(), .False); + const len = llvm_usize.constInt(array_ty.arrayLen(mod), .False); const slice_llvm_ty = try self.dg.lowerType(self.typeOfIndex(inst)); const operand = try self.resolveInst(ty_op.operand); if (!array_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -5806,20 +5778,20 @@ pub const FuncGen = struct { const mod = fg.dg.module; const target = mod.getTarget(); const llvm_usize_ty = fg.context.intType(target.ptrBitWidth()); - switch (ty.ptrSize()) { + switch (ty.ptrSize(mod)) { .Slice => { const len = fg.builder.buildExtractValue(ptr, 1, ""); - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const abi_size = elem_ty.abiSize(mod); if (abi_size == 1) return len; const abi_size_llvm_val = llvm_usize_ty.constInt(abi_size, .False); return fg.builder.buildMul(len, abi_size_llvm_val, ""); }, .One => { - const array_ty = ty.childType(); - const elem_ty = array_ty.childType(); + const array_ty = ty.childType(mod); + const elem_ty = array_ty.childType(mod); const abi_size = elem_ty.abiSize(mod); - return llvm_usize_ty.constInt(array_ty.arrayLen() * abi_size, .False); + return llvm_usize_ty.constInt(array_ty.arrayLen(mod) * abi_size, .False); }, .Many, .C => unreachable, } @@ -5832,10 +5804,11 @@ pub const FuncGen = struct { } fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const slice_ptr = try self.resolveInst(ty_op.operand); const slice_ptr_ty = self.typeOf(ty_op.operand); - const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType()); + const slice_llvm_ty = try self.dg.lowerPtrElemTy(slice_ptr_ty.childType(mod)); return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, ""); } @@ -5847,7 +5820,7 @@ pub const FuncGen = struct { const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); - const elem_ty = slice_ty.childType(); + const elem_ty = slice_ty.childType(mod); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; @@ -5863,13 +5836,14 @@ pub const FuncGen = struct { } fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const slice_ty = self.typeOf(bin_op.lhs); const slice = try self.resolveInst(bin_op.lhs); const index = try self.resolveInst(bin_op.rhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType()); + const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType(mod)); const base_ptr = self.builder.buildExtractValue(slice, 0, ""); const indices: [1]*llvm.Value = .{index}; return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, ""); @@ -5884,7 +5858,7 @@ pub const FuncGen = struct { const array_llvm_val = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const array_llvm_ty = try self.dg.lowerType(array_ty); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); if (isByRef(array_ty, mod)) { const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs }; if (isByRef(elem_ty, mod)) { @@ -5923,7 +5897,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -5951,14 +5925,14 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const elem_ptr = self.air.getRefType(ty_pl.ty); - if (elem_ptr.ptrInfo().data.vector_index != .none) return base_ptr; + if (elem_ptr.ptrInfo(mod).vector_index != .none) return base_ptr; const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty); if (ptr_ty.isSinglePointer(mod)) { @@ -6098,7 +6072,7 @@ pub const FuncGen = struct { const field_ptr = try self.resolveInst(extra.field_ptr); const target = self.dg.module.getTarget(); - const parent_ty = self.air.getRefType(ty_pl.ty).childType(); + const parent_ty = self.air.getRefType(ty_pl.ty).childType(mod); const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); @@ -6232,6 +6206,7 @@ pub const FuncGen = struct { } fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const dib = self.dg.object.di_builder orelse return null; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); @@ -6243,7 +6218,7 @@ pub const FuncGen = struct { name.ptr, self.di_file.?, self.prev_dbg_line, - try self.dg.object.lowerDebugType(ptr_ty.childType(), .full), + try self.dg.object.lowerDebugType(ptr_ty.childType(mod), .full), true, // always preserve 0, // flags ); @@ -6365,7 +6340,7 @@ pub const FuncGen = struct { const output_inst = try self.resolveInst(output); const output_ty = self.typeOf(output); assert(output_ty.zigTypeTag(mod) == .Pointer); - const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType()); + const elem_llvm_ty = try self.dg.lowerPtrElemTy(output_ty.childType(mod)); if (llvm_ret_indirect[i]) { // Pass the result by reference as an indirect output (e.g. "=*m") @@ -6466,7 +6441,7 @@ pub const FuncGen = struct { // an elementtype() attribute. if (constraint[0] == '*') { llvm_param_attrs[llvm_param_i] = llvm_elem_ty orelse - try self.dg.lowerPtrElemTy(arg_ty.childType()); + try self.dg.lowerPtrElemTy(arg_ty.childType(mod)); } else { llvm_param_attrs[llvm_param_i] = null; } @@ -6657,14 +6632,13 @@ pub const FuncGen = struct { operand_is_ptr: bool, pred: llvm.IntPredicate, ) !?*llvm.Value { + const mod = self.dg.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); - const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const optional_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const optional_llvm_ty = try self.dg.lowerType(optional_ty); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); - const mod = self.dg.module; + const payload_ty = optional_ty.optionalChild(mod); if (optional_ty.optionalReprIsPayload(mod)) { const loaded = if (operand_is_ptr) self.builder.buildLoad(optional_llvm_ty, operand, "") @@ -6709,7 +6683,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = err_union_ty.errorUnionPayload(); const err_set_ty = try self.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); @@ -6748,9 +6722,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const optional_ty = self.typeOf(ty_op.operand).childType(mod); + const payload_ty = optional_ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. @@ -6770,9 +6743,8 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const optional_ty = self.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const optional_ty = self.typeOf(ty_op.operand).childType(mod); + const payload_ty = optional_ty.optionalChild(mod); const non_null_bit = self.context.intType(8).constInt(1, .False); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // We have a pointer to a i8. We need to set it to 1 and then return the same pointer. @@ -6827,9 +6799,9 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; const result_ty = self.typeOfIndex(inst); - const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; + const payload_ty = if (operand_is_ptr) result_ty.childType(mod) else result_ty; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return if (operand_is_ptr) operand else null; @@ -6862,7 +6834,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); - const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); if (operand_is_ptr) { @@ -6895,7 +6867,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const err_union_ty = self.typeOf(ty_op.operand).childType(); + const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); @@ -6961,11 +6933,7 @@ pub const FuncGen = struct { if (isByRef(optional_ty, mod)) { const optional_ptr = self.buildAlloca(llvm_optional_ty, optional_ty.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 0, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); const non_null_ptr = self.builder.buildStructGEP(llvm_optional_ty, optional_ptr, 1, ""); _ = self.builder.buildStore(non_null_bit, non_null_ptr); @@ -6995,11 +6963,7 @@ pub const FuncGen = struct { const store_inst = self.builder.buildStore(ok_err_code, err_ptr); store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); try self.store(payload_ptr, payload_ptr_ty, operand, .NotAtomic); return result_ptr; } @@ -7027,11 +6991,7 @@ pub const FuncGen = struct { const store_inst = self.builder.buildStore(operand, err_ptr); store_inst.setAlignment(Type.anyerror.abiAlignment(mod)); const payload_ptr = self.builder.buildStructGEP(err_un_llvm_ty, result_ptr, payload_offset, ""); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = payload_ty, - }; - const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const payload_ptr_ty = try mod.singleMutPtrType(payload_ty); // TODO store undef to payload_ptr _ = payload_ptr; _ = payload_ptr_ty; @@ -7076,7 +7036,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(extra.rhs); const loaded_vector = blk: { - const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType()); + const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType(mod)); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr())); @@ -7287,7 +7247,7 @@ pub const FuncGen = struct { const inst_llvm_ty = try self.dg.lowerType(inst_ty); const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(); + const vec_len = inst_ty.vectorLen(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -7361,7 +7321,7 @@ pub const FuncGen = struct { if (scalar_ty.isSignedInt(mod)) { const scalar_bit_size_minus_one = scalar_ty.bitSize(mod) - 1; const bit_size_minus_one = if (inst_ty.zigTypeTag(mod) == .Vector) const_vector: { - const vec_len = inst_ty.vectorLen(); + const vec_len = inst_ty.vectorLen(mod); const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -7384,13 +7344,14 @@ pub const FuncGen = struct { } fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const ptr_ty = self.typeOf(bin_op.lhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); - switch (ptr_ty.ptrSize()) { + const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod)); + switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), offset }; @@ -7409,14 +7370,15 @@ pub const FuncGen = struct { } fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try self.resolveInst(bin_op.lhs); const offset = try self.resolveInst(bin_op.rhs); const negative_offset = self.builder.buildNeg(offset, ""); const ptr_ty = self.typeOf(bin_op.lhs); - const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType()); - switch (ptr_ty.ptrSize()) { + const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType(mod)); + switch (ptr_ty.ptrSize(mod)) { .One => { // It's a pointer to an array, so according to LLVM we need an extra GEP index. const indices: [2]*llvm.Value = .{ @@ -7587,7 +7549,7 @@ pub const FuncGen = struct { }; if (ty.zigTypeTag(mod) == .Vector) { - const vec_len = ty.vectorLen(); + const vec_len = ty.vectorLen(mod); const vector_result_ty = llvm_i32.vectorType(vec_len); var result = vector_result_ty.getUndef(); @@ -7672,8 +7634,8 @@ pub const FuncGen = struct { const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False); const sign_mask = one.constShl(shift_amt); const result = if (ty.zigTypeTag(mod) == .Vector) blk: { - const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, ""); - const cast_ty = int_llvm_ty.vectorType(ty.vectorLen()); + const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(mod), sign_mask, ""); + const cast_ty = int_llvm_ty.vectorType(ty.vectorLen(mod)); const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, ""); break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, ""); } else blk: { @@ -7720,7 +7682,7 @@ pub const FuncGen = struct { const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty); if (ty.zigTypeTag(mod) == .Vector) { const result = llvm_ty.getUndef(); - return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen()); + return self.buildElementwiseCall(libc_fn, ¶ms, result, ty.vectorLen(mod)); } break :b libc_fn; @@ -7887,7 +7849,7 @@ pub const FuncGen = struct { const bits = lhs_scalar_llvm_ty.constInt(lhs_bits, .False); const lhs_max = lhs_scalar_llvm_ty.constAllOnes(); if (rhs_ty.zigTypeTag(mod) == .Vector) { - const vec_len = rhs_ty.vectorLen(); + const vec_len = rhs_ty.vectorLen(mod); const bits_vec = self.builder.buildVectorSplat(vec_len, bits, ""); const lhs_max_vec = self.builder.buildVectorSplat(vec_len, lhs_max, ""); const in_range = self.builder.buildICmp(.ULT, rhs, bits_vec, ""); @@ -8059,7 +8021,7 @@ pub const FuncGen = struct { } if (operand_ty.zigTypeTag(mod) == .Vector and inst_ty.zigTypeTag(mod) == .Array) { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); if (!result_is_ref) { return self.dg.todo("implement bitcast vector to non-ref array", .{}); } @@ -8074,7 +8036,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); - const vector_len = operand_ty.arrayLen(); + const vector_len = operand_ty.arrayLen(mod); var i: u64 = 0; while (i < vector_len) : (i += 1) { const index_usize = llvm_usize.constInt(i, .False); @@ -8087,7 +8049,7 @@ pub const FuncGen = struct { } return array_ptr; } else if (operand_ty.zigTypeTag(mod) == .Array and inst_ty.zigTypeTag(mod) == .Vector) { - const elem_ty = operand_ty.childType(); + const elem_ty = operand_ty.childType(mod); const llvm_vector_ty = try self.dg.lowerType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); @@ -8108,7 +8070,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); - const vector_len = operand_ty.arrayLen(); + const vector_len = operand_ty.arrayLen(mod); var vector = llvm_vector_ty.getUndef(); var i: u64 = 0; while (i < vector_len) : (i += 1) { @@ -8207,7 +8169,7 @@ pub const FuncGen = struct { fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ptr_ty = self.typeOfIndex(inst); - const pointee_type = ptr_ty.childType(); + const pointee_type = ptr_ty.childType(mod); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.lowerType(pointee_type); @@ -8218,7 +8180,7 @@ pub const FuncGen = struct { fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ptr_ty = self.typeOfIndex(inst); - const ret_ty = ptr_ty.childType(); + const ret_ty = ptr_ty.childType(mod); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.lowerType(ret_ty); @@ -8232,11 +8194,11 @@ pub const FuncGen = struct { } fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !?*llvm.Value { + const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); - const operand_ty = ptr_ty.childType(); - const mod = self.dg.module; + const operand_ty = ptr_ty.childType(mod); const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { @@ -8271,8 +8233,10 @@ pub const FuncGen = struct { /// /// The first instruction of `body_tail` is the one whose copy we want to elide. fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool { + const mod = fg.dg.module; + const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { - switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0])) { + switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip.*)) { .none => continue, .write, .noret, .complex => return false, .tomb => return true, @@ -8288,7 +8252,7 @@ pub const FuncGen = struct { const inst = body_tail[0]; const ty_op = fg.air.instructions.items(.data)[inst].ty_op; const ptr_ty = fg.typeOf(ty_op.operand); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const ptr = try fg.resolveInst(ty_op.operand); elide: { @@ -8363,7 +8327,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(extra.ptr); var expected_value = try self.resolveInst(extra.expected_value); var new_value = try self.resolveInst(extra.new_value); - const operand_ty = self.typeOf(extra.ptr).elemType(); + const operand_ty = self.typeOf(extra.ptr).childType(mod); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); if (opt_abi_ty) |abi_ty| { // operand needs widening and truncating @@ -8409,7 +8373,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try self.resolveInst(pl_op.operand); const ptr_ty = self.typeOf(pl_op.operand); - const operand_ty = ptr_ty.elemType(); + const operand_ty = ptr_ty.childType(mod); const operand = try self.resolveInst(extra.operand); const is_signed_int = operand_ty.isSignedInt(mod); const is_float = operand_ty.isRuntimeFloat(); @@ -8464,7 +8428,7 @@ pub const FuncGen = struct { const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.typeOf(atomic_load.ptr); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_info = ptr_ty.ptrInfo(mod); const elem_ty = ptr_info.pointee_type; if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -8497,7 +8461,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const operand_ty = ptr_ty.childType(); + const operand_ty = ptr_ty.childType(mod); if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return null; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); @@ -8595,9 +8559,9 @@ pub const FuncGen = struct { const end_block = self.context.appendBasicBlock(self.llvm_func, "InlineMemsetEnd"); const llvm_usize_ty = self.context.intType(target.ptrBitWidth()); - const len = switch (ptr_ty.ptrSize()) { + const len = switch (ptr_ty.ptrSize(mod)) { .Slice => self.builder.buildExtractValue(dest_slice, 1, ""), - .One => llvm_usize_ty.constInt(ptr_ty.childType().arrayLen(), .False), + .One => llvm_usize_ty.constInt(ptr_ty.childType(mod).arrayLen(mod), .False), .Many, .C => unreachable, }; const elem_llvm_ty = try self.dg.lowerType(elem_ty); @@ -8665,7 +8629,7 @@ pub const FuncGen = struct { fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const un_ty = self.typeOf(bin_op.lhs).childType(); + const un_ty = self.typeOf(bin_op.lhs).childType(mod); const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return null; const union_ptr = try self.resolveInst(bin_op.lhs); @@ -8791,7 +8755,7 @@ pub const FuncGen = struct { // The truncated result at the end will be the correct bswap const scalar_llvm_ty = self.context.intType(bits + 8); if (operand_ty.zigTypeTag(mod) == .Vector) { - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len); const shifts = try self.gpa.alloc(*llvm.Value, vec_len); @@ -8980,7 +8944,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const llvm_ret_ty = try self.dg.lowerType(slice_ty); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); @@ -9097,10 +9061,11 @@ pub const FuncGen = struct { } fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const scalar = try self.resolveInst(ty_op.operand); const vector_ty = self.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const len = vector_ty.vectorLen(mod); return self.builder.buildVectorSplat(len, scalar, ""); } @@ -9122,7 +9087,7 @@ pub const FuncGen = struct { const b = try self.resolveInst(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(mod); // LLVM uses integers larger than the length of the first array to // index into the second array. This was deemed unnecessarily fragile @@ -9298,14 +9263,14 @@ pub const FuncGen = struct { .ty = scalar_ty, .val = Value.initPayload(&init_value_payload.base), }); - return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(), init_value); + return self.buildReducedCall(libc_fn, operand, operand_ty.vectorLen(mod), init_value); } fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const result_ty = self.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const len = @intCast(usize, result_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const llvm_result_ty = try self.dg.lowerType(result_ty); @@ -9400,7 +9365,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.lowerType(Type.usize); const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); - const array_info = result_ty.arrayInfo(); + const array_info = result_ty.arrayInfo(mod); var elem_ptr_payload: Type.Payload.Pointer = .{ .data = .{ .pointee_type = array_info.elem_type, @@ -9720,7 +9685,7 @@ pub const FuncGen = struct { } const mod = self.dg.module; - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space @@ -9763,9 +9728,8 @@ pub const FuncGen = struct { opt_ty: Type, can_elide_load: bool, ) !*llvm.Value { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); const mod = fg.dg.module; + const payload_ty = opt_ty.optionalChild(mod); if (isByRef(opt_ty, mod)) { // We have a pointer and we need to return a pointer to the first field. @@ -9827,13 +9791,13 @@ pub const FuncGen = struct { struct_ptr_ty: Type, field_index: u32, ) !?*llvm.Value { - const struct_ty = struct_ptr_ty.childType(); const mod = self.dg.module; + const struct_ty = struct_ptr_ty.childType(mod); switch (struct_ty.zigTypeTag(mod)) { .Struct => switch (struct_ty.containerLayout()) { .Packed => { const result_ty = self.typeOfIndex(inst); - const result_ty_info = result_ty.ptrInfo().data; + const result_ty_info = result_ty.ptrInfo(mod); if (result_ty_info.host_size != 0) { // From LLVM's perspective, a pointer to a packed struct and a pointer @@ -9919,7 +9883,7 @@ pub const FuncGen = struct { /// For isByRef=false types, it creates a load instruction and returns it. fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value { const mod = self.dg.module; - const info = ptr_ty.ptrInfo().data; + const info = ptr_ty.ptrInfo(mod); if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null; const ptr_alignment = info.alignment(mod); @@ -9954,7 +9918,7 @@ pub const FuncGen = struct { containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); + const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); @@ -9992,9 +9956,9 @@ pub const FuncGen = struct { elem: *llvm.Value, ordering: llvm.AtomicOrdering, ) !void { - const info = ptr_ty.ptrInfo().data; - const elem_ty = info.pointee_type; const mod = self.dg.module; + const info = ptr_ty.ptrInfo(mod); + const elem_ty = info.pointee_type; if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { return; } @@ -10026,7 +9990,7 @@ pub const FuncGen = struct { assert(ordering == .NotAtomic); containing_int.setAlignment(ptr_alignment); containing_int.setVolatile(ptr_volatile); - const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(mod)); + const elem_bits = @intCast(c_uint, ptr_ty.childType(mod).bitSize(mod)); const containing_int_ty = containing_int.typeOf(); const shift_amt = containing_int_ty.constInt(info.bit_offset, .False); // Convert to equally-sized integer type in order to perform the bit @@ -10864,8 +10828,7 @@ const ParamTypeIterator = struct { .Unspecified, .Inline => { it.zig_index += 1; it.llvm_index += 1; - var buf: Type.Payload.ElemType = undefined; - if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(&buf).isSlice(mod))) { + if (ty.isSlice(mod) or (ty.zigTypeTag(mod) == .Optional and ty.optionalChild(mod).isSlice(mod))) { it.llvm_index += 1; return .slice; } else if (isByRef(ty, mod)) { @@ -11185,8 +11148,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { return true; }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index f69c6cb317..9de2c03142 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -625,20 +625,20 @@ pub const DeclGen = struct { .Array => switch (val.tag()) { .aggregate => { const elem_vals = val.castTag(.aggregate).?.data; - const elem_ty = ty.elemType(); - const len = @intCast(u32, ty.arrayLenIncludingSentinel()); // TODO: limit spir-v to 32 bit arrays in a more elegant way. + const elem_ty = ty.childType(mod); + const len = @intCast(u32, ty.arrayLenIncludingSentinel(mod)); // TODO: limit spir-v to 32 bit arrays in a more elegant way. for (elem_vals[0..len]) |elem_val| { try self.lower(elem_ty, elem_val); } }, .repeated => { const elem_val = val.castTag(.repeated).?.data; - const elem_ty = ty.elemType(); - const len = @intCast(u32, ty.arrayLen()); + const elem_ty = ty.childType(mod); + const len = @intCast(u32, ty.arrayLen(mod)); for (0..len) |_| { try self.lower(elem_ty, elem_val); } - if (ty.sentinel()) |sentinel| { + if (ty.sentinel(mod)) |sentinel| { try self.lower(elem_ty, sentinel); } }, @@ -646,7 +646,7 @@ pub const DeclGen = struct { const str_lit = val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; try self.addBytes(bytes); - if (ty.sentinel()) |sentinel| { + if (ty.sentinel(mod)) |sentinel| { try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod))); } }, @@ -706,8 +706,7 @@ pub const DeclGen = struct { } }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); const has_payload = !val.isNull(mod); const abi_size = ty.abiSize(mod); @@ -1216,10 +1215,10 @@ pub const DeclGen = struct { return try self.spv.resolve(.{ .float_type = .{ .bits = bits } }); }, .Array => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const elem_ty_ref = try self.resolveType(elem_ty, .direct); - const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel()) orelse { - return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel()}); + const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse { + return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)}); }; return self.spv.arrayType(total_len, elem_ty_ref); }, @@ -1248,7 +1247,7 @@ pub const DeclGen = struct { }, }, .Pointer => { - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); const storage_class = spvStorageClass(ptr_info.@"addrspace"); const child_ty_ref = try self.resolveType(ptr_info.pointee_type, .indirect); @@ -1280,8 +1279,8 @@ pub const DeclGen = struct { // TODO: Properly verify sizes and child type. return try self.spv.resolve(.{ .vector_type = .{ - .component_type = try self.resolveType(ty.elemType(), repr), - .component_count = @intCast(u32, ty.vectorLen()), + .component_type = try self.resolveType(ty.childType(mod), repr), + .component_count = @intCast(u32, ty.vectorLen(mod)), } }); }, .Struct => { @@ -1335,8 +1334,7 @@ pub const DeclGen = struct { } }); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // Just use a bool. // Note: Always generate the bool with indirect format, to save on some sanity @@ -1685,7 +1683,8 @@ pub const DeclGen = struct { } fn load(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef) !IdRef { - const value_ty = ptr_ty.childType(); + const mod = self.module; + const value_ty = ptr_ty.childType(mod); const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ @@ -1701,7 +1700,8 @@ pub const DeclGen = struct { } fn store(self: *DeclGen, ptr_ty: Type, ptr_id: IdRef, value_id: IdRef) !void { - const value_ty = ptr_ty.childType(); + const mod = self.module; + const value_ty = ptr_ty.childType(mod); const indirect_value_id = try self.convertToIndirect(value_ty, value_id); const access = spec.MemoryAccess.Extended{ .Volatile = ptr_ty.isVolatilePtr(), @@ -2072,7 +2072,7 @@ pub const DeclGen = struct { const b = try self.resolve(extra.b); const mask = self.air.values[extra.mask]; const mask_len = extra.mask_len; - const a_len = self.typeOf(extra.a).vectorLen(); + const a_len = self.typeOf(extra.a).vectorLen(mod); const result_id = self.spv.allocId(); const result_type_id = try self.resolveTypeId(ty); @@ -2138,9 +2138,10 @@ pub const DeclGen = struct { } fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef { + const mod = self.module; const result_ty_ref = try self.resolveType(result_ty, .direct); - switch (ptr_ty.ptrSize()) { + switch (ptr_ty.ptrSize(mod)) { .One => { // Pointer to array // TODO: Is this correct? @@ -2498,7 +2499,7 @@ pub const DeclGen = struct { // Construct new pointer type for the resulting pointer const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T. const elem_ty_ref = try self.resolveType(elem_ty, .direct); - const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace())); + const elem_ptr_ty_ref = try self.spv.ptrType(elem_ty_ref, spvStorageClass(ptr_ty.ptrAddressSpace(mod))); if (ptr_ty.isSinglePointer(mod)) { // Pointer-to-array. In this case, the resulting pointer is not of the same type // as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain. @@ -2516,7 +2517,7 @@ pub const DeclGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.typeOf(bin_op.lhs); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); // TODO: Make this return a null ptr or something if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -2526,6 +2527,7 @@ pub const DeclGen = struct { } fn airPtrElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const ptr_id = try self.resolve(bin_op.lhs); @@ -2536,9 +2538,9 @@ pub const DeclGen = struct { // If we have a pointer-to-array, construct an element pointer to use with load() // If we pass ptr_ty directly, it will attempt to load the entire array rather than // just an element. - var elem_ptr_info = ptr_ty.ptrInfo(); - elem_ptr_info.data.size = .One; - const elem_ptr_ty = Type.initPayload(&elem_ptr_info.base); + var elem_ptr_info = ptr_ty.ptrInfo(mod); + elem_ptr_info.size = .One; + const elem_ptr_ty = try Type.ptr(undefined, mod, elem_ptr_info); return try self.load(elem_ptr_ty, elem_ptr_id); } @@ -2586,7 +2588,7 @@ pub const DeclGen = struct { field_index: u32, ) !?IdRef { const mod = self.module; - const object_ty = object_ptr_ty.childType(); + const object_ty = object_ptr_ty.childType(mod); switch (object_ty.zigTypeTag(mod)) { .Struct => switch (object_ty.containerLayout()) { .Packed => unreachable, // TODO @@ -2662,9 +2664,10 @@ pub const DeclGen = struct { fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ptr_ty = self.typeOfIndex(inst); - assert(ptr_ty.ptrAddressSpace() == .generic); - const child_ty = ptr_ty.childType(); + assert(ptr_ty.ptrAddressSpace(mod) == .generic); + const child_ty = ptr_ty.childType(mod); const child_ty_ref = try self.resolveType(child_ty, .indirect); return try self.alloc(child_ty_ref, null); } @@ -2834,7 +2837,7 @@ pub const DeclGen = struct { const mod = self.module; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.typeOf(un_op); - const ret_ty = ptr_ty.childType(); + const ret_ty = ptr_ty.childType(mod); if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { try self.func.body.emit(self.spv.gpa, .OpReturn, {}); @@ -2971,8 +2974,7 @@ pub const DeclGen = struct { const operand_id = try self.resolve(un_op); const optional_ty = self.typeOf(un_op); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); + const payload_ty = optional_ty.optionalChild(mod); const bool_ty_ref = try self.resolveType(Type.bool, .direct); diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index 1d4840aeb7..c5ba429ec9 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -11,7 +11,8 @@ const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; -const ZigDecl = @import("../../Module.zig").Decl; +const ZigModule = @import("../../Module.zig"); +const ZigDecl = ZigModule.Decl; const spec = @import("spec.zig"); const Word = spec.Word; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 682431203e..178f9fa64c 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -219,8 +219,7 @@ pub const DeclState = struct { try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); } else { // Non-pointer optionals are structs: struct { .maybe = *, .val = * } - var buf = try arena.create(Type.Payload.ElemType); - const payload_ty = ty.optionalChild(buf); + const payload_ty = ty.optionalChild(mod); // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); // DW.AT.byte_size, DW.FORM.udata @@ -304,7 +303,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); } }, .Array => { @@ -315,7 +314,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index)); // DW.AT.subrange_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_dim)); // DW.AT.type, DW.FORM.ref4 @@ -323,7 +322,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index)); // DW.AT.count, DW.FORM.udata - const len = ty.arrayLenIncludingSentinel(); + const len = ty.arrayLenIncludingSentinel(mod); try leb128.writeULEB128(dbg_info_buffer.writer(), len); // DW.AT.array_type delimit children try dbg_info_buffer.append(0); @@ -688,7 +687,7 @@ pub const DeclState = struct { const mod = self.mod; const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - const child_ty = if (is_ptr) ty.childType() else ty; + const child_ty = if (is_ptr) ty.childType(mod) else ty; switch (loc) { .register => |reg| { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 0154207368..fb7ca3a87f 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2931,7 +2931,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 { const atom_index = try wasm.createAtom(); const atom = wasm.getAtomPtr(atom_index); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const mod = wasm.base.options.module.?; atom.alignment = slice_ty.abiAlignment(mod); const sym_index = atom.sym_index; @@ -2988,7 +2988,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { for (mod.error_name_list.items) |error_name| { const len = @intCast(u32, error_name.len + 1); // names are 0-termianted - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); + const slice_ty = Type.const_slice_u8_sentinel_0; const offset = @intCast(u32, atom.code.items.len); // first we create the data for the slice of the name try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated diff --git a/src/print_air.zig b/src/print_air.zig index f4a1aeae32..8717bdc6bf 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -433,9 +433,10 @@ const Writer = struct { } fn writeAggregateInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const vector_ty = w.air.getRefType(ty_pl.ty); - const len = @intCast(usize, vector_ty.arrayLen()); + const len = @intCast(usize, vector_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]); try w.writeType(s, vector_ty); @@ -512,10 +513,11 @@ const Writer = struct { } fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const mod = w.module; const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Bin, pl_op.payload).data; - const elem_ty = w.typeOfIndex(inst).childType(); + const elem_ty = w.typeOfIndex(inst).childType(mod); try w.writeType(s, elem_ty); try s.writeAll(", "); try w.writeOperand(s, inst, 0, pl_op.operand); diff --git a/src/type.zig b/src/type.zig index 868ae4231b..1f970919c9 100644 --- a/src/type.zig +++ b/src/type.zig @@ -40,7 +40,7 @@ pub const Type = struct { .ptr_type => return .Pointer, .array_type => return .Array, .vector_type => return .Vector, - .optional_type => return .Optional, + .opt_type => return .Optional, .error_union_type => return .ErrorUnion, .struct_type => return .Struct, .union_type => return .Union, @@ -118,38 +118,17 @@ pub const Type = struct { .function => return .Fn, .array, - .array_u8_sentinel_0, - .array_u8, .array_sentinel, => return .Array, - .vector => return .Vector, - - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, .inferred_alloc_const, .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => return .Pointer, - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => return .Optional, + .optional => return .Optional, - .anyerror_void_error_union, .error_union => return .ErrorUnion, + .error_union => return .ErrorUnion, .anyframe_T => return .AnyFrame, @@ -177,8 +156,7 @@ pub const Type = struct { return switch (self.zigTypeTag(mod)) { .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(mod), .Optional => { - var buf: Payload.ElemType = undefined; - return self.optionalChild(&buf).baseZigTypeTag(mod); + return self.optionalChild(mod).baseZigTypeTag(mod); }, else => |t| t, }; @@ -218,8 +196,7 @@ pub const Type = struct { .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr()), .Optional => { if (!is_equality_cmp) return false; - var buf: Payload.ElemType = undefined; - return ty.optionalChild(&buf).isSelfComparable(mod, is_equality_cmp); + return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); }, }; } @@ -275,9 +252,8 @@ pub const Type = struct { } pub fn castTag(self: Type, comptime t: Tag) ?*t.Type() { - if (self.ip_index != .none) { - return null; - } + assert(self.ip_index == .none); + if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; @@ -287,281 +263,61 @@ pub const Type = struct { return null; } - pub fn castPointer(self: Type) ?*Payload.ElemType { - return switch (self.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => self.cast(Payload.ElemType), - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - else => null, - }; - } - /// If it is a function pointer, returns the function type. Otherwise returns null. pub fn castPtrToFn(ty: Type, mod: *const Module) ?Type { if (ty.zigTypeTag(mod) != .Pointer) return null; - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); if (elem_ty.zigTypeTag(mod) != .Fn) return null; return elem_ty; } - pub fn ptrIsMutable(ty: Type) bool { - return switch (ty.tag()) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .many_const_pointer, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .c_const_pointer, - .const_slice, - => false, - - .single_mut_pointer, - .many_mut_pointer, - .manyptr_u8, - .c_mut_pointer, - .mut_slice, - => true, - - .pointer => ty.castTag(.pointer).?.data.mutable, - - else => unreachable, + pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.mutable, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| !ptr_type.is_const, + else => unreachable, + }, }; } - pub const ArrayInfo = struct { elem_type: Type, sentinel: ?Value = null, len: u64 }; - pub fn arrayInfo(self: Type) ArrayInfo { + pub const ArrayInfo = struct { + elem_type: Type, + sentinel: ?Value = null, + len: u64, + }; + + pub fn arrayInfo(self: Type, mod: *const Module) ArrayInfo { return .{ - .len = self.arrayLen(), - .sentinel = self.sentinel(), - .elem_type = self.elemType(), + .len = self.arrayLen(mod), + .sentinel = self.sentinel(mod), + .elem_type = self.childType(mod), }; } - pub fn ptrInfo(self: Type) Payload.Pointer { - switch (self.ip_index) { - .none => switch (self.tag()) { - .single_const_pointer_to_comptime_int => return .{ .data = .{ - .pointee_type = Type.comptime_int, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .const_slice_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .const_slice_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .many_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_const_u8_sentinel_0 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = Value.zero, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Many, - } }, - .many_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .manyptr_u8 => return .{ .data = .{ - .pointee_type = Type.u8, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Many, - } }, - .c_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = false, - .@"volatile" = false, - .size = .C, - } }, - .c_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = true, - .mutable = true, - .@"volatile" = false, - .size = .C, - } }, - .const_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .Slice, - } }, - .mut_slice => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .Slice, - } }, - - .pointer => return self.castTag(.pointer).?.*, - - .optional_single_mut_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = .One, - } }, - .optional_single_const_pointer => return .{ .data = .{ - .pointee_type = self.castPointer().?.data, - .sentinel = null, - .@"align" = 0, - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = false, - .@"volatile" = false, - .size = .One, - } }, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - return child_type.ptrInfo(); + pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data, + .optional => b: { + const child_type = ty.optionalChild(mod); + break :b child_type.ptrInfo(mod); }, else => unreachable, }, - else => @panic("TODO"), - } + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |p| Payload.Pointer.Data.fromKey(p), + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| Payload.Pointer.Data.fromKey(p), + else => unreachable, + }, + else => unreachable, + }, + }; } pub fn eql(a: Type, b: Type, mod: *Module) bool { @@ -658,20 +414,17 @@ pub const Type = struct { }, .array, - .array_u8_sentinel_0, - .array_u8, .array_sentinel, - .vector, => { if (a.zigTypeTag(mod) != b.zigTypeTag(mod)) return false; - if (a.arrayLen() != b.arrayLen()) + if (a.arrayLen(mod) != b.arrayLen(mod)) return false; - const elem_ty = a.elemType(); - if (!elem_ty.eql(b.elemType(), mod)) + const elem_ty = a.childType(mod); + if (!elem_ty.eql(b.childType(mod), mod)) return false; - const sentinel_a = a.sentinel(); - const sentinel_b = b.sentinel(); + const sentinel_a = a.sentinel(mod); + const sentinel_b = b.sentinel(mod); if (sentinel_a) |sa| { if (sentinel_b) |sb| { return sa.eql(sb, elem_ty, mod); @@ -683,28 +436,14 @@ pub const Type = struct { } }, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, .inferred_alloc_const, .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => { if (b.zigTypeTag(mod) != .Pointer) return false; - const info_a = a.ptrInfo().data; - const info_b = b.ptrInfo().data; + const info_a = a.ptrInfo(mod); + const info_b = b.ptrInfo(mod); if (!info_a.pointee_type.eql(info_b.pointee_type, mod)) return false; if (info_a.@"align" != info_b.@"align") @@ -743,18 +482,13 @@ pub const Type = struct { return true; }, - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { + .optional => { if (b.zigTypeTag(mod) != .Optional) return false; - var buf_a: Payload.ElemType = undefined; - var buf_b: Payload.ElemType = undefined; - return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b), mod); + return a.optionalChild(mod).eql(b.optionalChild(mod), mod); }, - .anyerror_void_error_union, .error_union => { + .error_union => { if (b.zigTypeTag(mod) != .ErrorUnion) return false; const a_set = a.errorUnionSet(); @@ -947,47 +681,23 @@ pub const Type = struct { }, .array, - .array_u8_sentinel_0, - .array_u8, .array_sentinel, => { std.hash.autoHash(hasher, std.builtin.TypeId.Array); - const elem_ty = ty.elemType(); - std.hash.autoHash(hasher, ty.arrayLen()); + const elem_ty = ty.childType(mod); + std.hash.autoHash(hasher, ty.arrayLen(mod)); hashWithHasher(elem_ty, hasher, mod); - hashSentinel(ty.sentinel(), elem_ty, hasher, mod); + hashSentinel(ty.sentinel(mod), elem_ty, hasher, mod); }, - .vector => { - std.hash.autoHash(hasher, std.builtin.TypeId.Vector); - - const elem_ty = ty.elemType(); - std.hash.autoHash(hasher, ty.vectorLen()); - hashWithHasher(elem_ty, hasher, mod); - }, - - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, .inferred_alloc_const, .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => { std.hash.autoHash(hasher, std.builtin.TypeId.Pointer); - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); hashWithHasher(info.pointee_type, hasher, mod); hashSentinel(info.sentinel, info.pointee_type, hasher, mod); std.hash.autoHash(hasher, info.@"align"); @@ -1001,17 +711,13 @@ pub const Type = struct { std.hash.autoHash(hasher, info.size); }, - .optional, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { + .optional => { std.hash.autoHash(hasher, std.builtin.TypeId.Optional); - var buf: Payload.ElemType = undefined; - hashWithHasher(ty.optionalChild(&buf), hasher, mod); + hashWithHasher(ty.optionalChild(mod), hasher, mod); }, - .anyerror_void_error_union, .error_union => { + .error_union => { std.hash.autoHash(hasher, std.builtin.TypeId.ErrorUnion); const set_ty = ty.errorUnionSet(); @@ -1023,7 +729,7 @@ pub const Type = struct { .anyframe_T => { std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); - hashWithHasher(ty.childType(), hasher, mod); + hashWithHasher(ty.childType(mod), hasher, mod); }, .empty_struct => { @@ -1129,33 +835,12 @@ pub const Type = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .inferred_alloc_const, .inferred_alloc_mut, .empty_struct_literal, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => unreachable, - .array_u8, - .array_u8_sentinel_0, - => return self.copyPayloadShallow(allocator, Payload.Len), - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, .anyframe_T, => { const payload = self.cast(Payload.ElemType).?; @@ -1170,13 +855,6 @@ pub const Type = struct { }; }, - .vector => { - const payload = self.castTag(.vector).?.data; - return Tag.vector.create(allocator, .{ - .len = payload.len, - .elem_type = try payload.elem_type.copy(allocator), - }); - }, .array => { const payload = self.castTag(.array).?.data; return Tag.array.create(allocator, .{ @@ -1408,13 +1086,6 @@ pub const Type = struct { }); }, - .anyerror_void_error_union => return writer.writeAll("anyerror!void"), - .const_slice_u8 => return writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0 => return writer.writeAll("[:0]const u8"), - .single_const_pointer_to_comptime_int => return writer.writeAll("*const comptime_int"), - .manyptr_u8 => return writer.writeAll("[*]u8"), - .manyptr_const_u8 => return writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0 => return writer.writeAll("[*:0]const u8"), .function => { const payload = ty.castTag(.function).?.data; try writer.writeAll("fn("); @@ -1447,20 +1118,6 @@ pub const Type = struct { ty = return_type; continue; }, - .array_u8 => { - const len = ty.castTag(.array_u8).?.data; - return writer.print("[{d}]u8", .{len}); - }, - .array_u8_sentinel_0 => { - const len = ty.castTag(.array_u8_sentinel_0).?.data; - return writer.print("[{d}:0]u8", .{len}); - }, - .vector => { - const payload = ty.castTag(.vector).?.data; - try writer.print("@Vector({d}, ", .{payload.len}); - try payload.elem_type.dump("", .{}, writer); - return writer.writeAll(")"); - }, .array => { const payload = ty.castTag(.array).?.data; try writer.print("[{d}]", .{payload.len}); @@ -1512,72 +1169,12 @@ pub const Type = struct { try writer.writeAll("}"); return; }, - .single_const_pointer => { - const pointee_type = ty.castTag(.single_const_pointer).?.data; - try writer.writeAll("*const "); - ty = pointee_type; - continue; - }, - .single_mut_pointer => { - const pointee_type = ty.castTag(.single_mut_pointer).?.data; - try writer.writeAll("*"); - ty = pointee_type; - continue; - }, - .many_const_pointer => { - const pointee_type = ty.castTag(.many_const_pointer).?.data; - try writer.writeAll("[*]const "); - ty = pointee_type; - continue; - }, - .many_mut_pointer => { - const pointee_type = ty.castTag(.many_mut_pointer).?.data; - try writer.writeAll("[*]"); - ty = pointee_type; - continue; - }, - .c_const_pointer => { - const pointee_type = ty.castTag(.c_const_pointer).?.data; - try writer.writeAll("[*c]const "); - ty = pointee_type; - continue; - }, - .c_mut_pointer => { - const pointee_type = ty.castTag(.c_mut_pointer).?.data; - try writer.writeAll("[*c]"); - ty = pointee_type; - continue; - }, - .const_slice => { - const pointee_type = ty.castTag(.const_slice).?.data; - try writer.writeAll("[]const "); - ty = pointee_type; - continue; - }, - .mut_slice => { - const pointee_type = ty.castTag(.mut_slice).?.data; - try writer.writeAll("[]"); - ty = pointee_type; - continue; - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); ty = child_type; continue; }, - .optional_single_const_pointer => { - const pointee_type = ty.castTag(.optional_single_const_pointer).?.data; - try writer.writeAll("?*const "); - ty = pointee_type; - continue; - }, - .optional_single_mut_pointer => { - const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data; - try writer.writeAll("?*"); - ty = pointee_type; - continue; - }, .pointer => { const payload = ty.castTag(.pointer).?.data; @@ -1680,7 +1277,7 @@ pub const Type = struct { .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |s| return writer.writeAll(@tagName(s)), .struct_type => @panic("TODO"), @@ -1733,14 +1330,6 @@ pub const Type = struct { try decl.renderFullyQualifiedName(mod, writer); }, - .anyerror_void_error_union => try writer.writeAll("anyerror!void"), - .const_slice_u8 => try writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0 => try writer.writeAll("[:0]const u8"), - .single_const_pointer_to_comptime_int => try writer.writeAll("*const comptime_int"), - .manyptr_u8 => try writer.writeAll("[*]u8"), - .manyptr_const_u8 => try writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0 => try writer.writeAll("[*:0]const u8"), - .error_set_inferred => { const func = ty.castTag(.error_set_inferred).?.data.func; @@ -1799,20 +1388,6 @@ pub const Type = struct { try print(error_union.payload, writer, mod); }, - .array_u8 => { - const len = ty.castTag(.array_u8).?.data; - try writer.print("[{d}]u8", .{len}); - }, - .array_u8_sentinel_0 => { - const len = ty.castTag(.array_u8_sentinel_0).?.data; - try writer.print("[{d}:0]u8", .{len}); - }, - .vector => { - const payload = ty.castTag(.vector).?.data; - try writer.print("@Vector({d}, ", .{payload.len}); - try print(payload.elem_type, writer, mod); - try writer.writeAll(")"); - }, .array => { const payload = ty.castTag(.array).?.data; try writer.print("[{d}]", .{payload.len}); @@ -1865,17 +1440,8 @@ pub const Type = struct { try writer.writeAll("}"); }, - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const info = ty.ptrInfo().data; + .pointer => { + const info = ty.ptrInfo(mod); if (info.sentinel) |s| switch (info.size) { .One, .C => unreachable, @@ -1920,16 +1486,6 @@ pub const Type = struct { try writer.writeByte('?'); try print(child_type, writer, mod); }, - .optional_single_mut_pointer => { - const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data; - try writer.writeAll("?*"); - try print(pointee_type, writer, mod); - }, - .optional_single_const_pointer => { - const pointee_type = ty.castTag(.optional_single_const_pointer).?.data; - try writer.writeAll("?*const "); - try print(pointee_type, writer, mod); - }, .anyframe_T => { const return_type = ty.castTag(.anyframe_T).?.data; try writer.print("anyframe->", .{}); @@ -1963,12 +1519,6 @@ pub const Type = struct { pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { - .single_const_pointer_to_comptime_int => return Value{ .ip_index = .single_const_pointer_to_comptime_int_type, .legacy = undefined }, - .const_slice_u8 => return Value{ .ip_index = .const_slice_u8_type, .legacy = undefined }, - .const_slice_u8_sentinel_0 => return Value{ .ip_index = .const_slice_u8_sentinel_0_type, .legacy = undefined }, - .manyptr_u8 => return Value{ .ip_index = .manyptr_u8_type, .legacy = undefined }, - .manyptr_const_u8 => return Value{ .ip_index = .manyptr_const_u8_type, .legacy = undefined }, - .manyptr_const_u8_sentinel_0 => return Value{ .ip_index = .manyptr_const_u8_sentinel_0_type, .legacy = undefined }, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, else => return Value.Tag.ty.create(allocator, self), @@ -1996,10 +1546,41 @@ pub const Type = struct { ) RuntimeBitsError!bool { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| return int_type.bits != 0, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .ptr_type => |ptr_type| { + // Pointers to zero-bit types still have a runtime address; however, pointers + // to comptime-only types do not, with the exception of function pointers. + if (ignore_comptime_only) return true; + const child_ty = ptr_type.elem_type.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic; + if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); + return !comptimeOnly(ty, mod); + }, + .array_type => |array_type| { + if (array_type.sentinel != .none) { + return array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + } else { + return array_type.len > 0 and + try array_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + } + }, + .vector_type => |vector_type| { + return vector_type.len > 0 and + try vector_type.child.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); + }, + .opt_type => |child| { + const child_ty = child.toType(); + if (child_ty.isNoReturn()) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) { + return true; + } else if (strat == .sema) { + return !(try strat.sema.typeRequiresComptime(child_ty)); + } else { + return !comptimeOnly(child_ty, mod); + } + }, .error_union_type => @panic("TODO"), .simple_type => |t| return switch (t) { .f16, @@ -2058,14 +1639,7 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; switch (ty.tag()) { - .const_slice_u8, - .const_slice_u8_sentinel_0, - .array_u8_sentinel_0, - .anyerror_void_error_union, .error_set_inferred, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .@"opaque", .error_set_single, @@ -2077,22 +1651,12 @@ pub const Type = struct { // Pointers to zero-bit types still have a runtime address; however, pointers // to comptime-only types do not, with the exception of function pointers. .anyframe_T, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .pointer, => { if (ignore_comptime_only) { return true; - } else if (ty.childType().zigTypeTag(mod) == .Fn) { - return !ty.childType().fnInfo().is_generic; + } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { + return !ty.childType(mod).fnInfo().is_generic; } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(ty)); } else { @@ -2101,7 +1665,6 @@ pub const Type = struct { }, // These are false because they are comptime-only types. - .single_const_pointer_to_comptime_int, .empty_struct, .empty_struct_literal, // These are function *bodies*, not pointers. @@ -2111,8 +1674,7 @@ pub const Type = struct { => return false, .optional => { - var buf: Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { // Then the optional is comptime-known to be null. return false; @@ -2200,10 +1762,9 @@ pub const Type = struct { } }, - .array, .vector => return ty.arrayLen() != 0 and - try ty.elemType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), - .array_u8 => return ty.arrayLen() != 0, - .array_sentinel => return ty.childType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .array => return ty.arrayLen(mod) != 0 and + try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), + .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .tuple, .anon_struct => { const tuple = ty.tupleFields(); @@ -2224,14 +1785,14 @@ pub const Type = struct { /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => return true, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), - .error_union_type => @panic("TODO"), - .simple_type => |t| return switch (t) { + if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => true, + .ptr_type => true, + .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), + .vector_type => true, + .opt_type => |child| child.toType().isPtrLikeOptional(mod), + .error_union_type => false, + .simple_type => |t| switch (t) { .f16, .f32, .f64, @@ -2287,23 +1848,8 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .array_u8, - .array_u8_sentinel_0, .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer_to_comptime_int, .enum_numbered, - .vector, - .optional_single_mut_pointer, - .optional_single_const_pointer, => true, .error_set, @@ -2313,13 +1859,8 @@ pub const Type = struct { .@"opaque", // These are function bodies, not function pointers. .function, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, .enum_simple, .error_union, - .anyerror_void_error_union, .anyframe_T, .tuple, .anon_struct, @@ -2336,7 +1877,7 @@ pub const Type = struct { .array, .array_sentinel, - => ty.childType().hasWellDefinedLayout(mod), + => ty.childType(mod).hasWellDefinedLayout(mod), .optional => ty.isPtrLikeOptional(mod), .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, @@ -2417,76 +1958,36 @@ pub const Type = struct { } pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 { - switch (ty.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { - const child_type = ty.cast(Payload.ElemType).?.data; - if (opt_sema) |sema| { - const res = try child_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } - return (child_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - }, - - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return 1, + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => { + const ptr_info = ty.castTag(.pointer).?.data; + if (ptr_info.@"align" != 0) { + return ptr_info.@"align"; + } else if (opt_sema) |sema| { + const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); + return res.scalar; + } else { + return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + } + }, + .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema), - .pointer => { - const ptr_info = ty.castTag(.pointer).?.data; - if (ptr_info.@"align" != 0) { - return ptr_info.@"align"; - } else if (opt_sema) |sema| { - const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } else { - return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - } + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => @panic("TODO"), }, - .optional => return ty.castTag(.optional).?.data.ptrAlignmentAdvanced(mod, opt_sema), - - else => unreachable, } } - pub fn ptrAddressSpace(self: Type) std.builtin.AddressSpace { + pub fn ptrAddressSpace(self: Type, mod: *const Module) std.builtin.AddressSpace { return switch (self.tag()) { - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .inferred_alloc_const, - .inferred_alloc_mut, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => .generic, - .pointer => self.castTag(.pointer).?.data.@"addrspace", .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - return child_type.ptrAddressSpace(); + const child_type = self.optionalChild(mod); + return child_type.ptrAddressSpace(mod); }, else => unreachable, @@ -2530,15 +2031,31 @@ pub const Type = struct { ) Module.CompileError!AbiAlignmentAdvanced { const target = mod.getTarget(); + const opt_sema = switch (strat) { + .sema => |sema| sema, + else => null, + }; + if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(int_type.bits, target) }; }, - .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .ptr_type => { + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + }, + .array_type => |array_type| { + return array_type.child.toType().abiAlignmentAdvanced(mod, strat); + }, + .vector_type => |vector_type| { + const bits_u64 = try bitSizeAdvanced(vector_type.child.toType(), mod, opt_sema); + const bits = @intCast(u32, bits_u64); + const bytes = ((bits * vector_type.len) + 7) / 8; + const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); + return AbiAlignmentAdvanced{ .scalar = alignment }; + }, + + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .bool, @@ -2617,15 +2134,8 @@ pub const Type = struct { .enum_tag => unreachable, // it's a value, not a type }; - const opt_sema = switch (strat) { - .sema => |sema| sema, - else => null, - }; switch (ty.tag()) { - .array_u8_sentinel_0, - .array_u8, - .@"opaque", - => return AbiAlignmentAdvanced{ .scalar = 1 }, + .@"opaque" => return AbiAlignmentAdvanced{ .scalar = 1 }, // represents machine code; not a pointer .function => { @@ -2634,47 +2144,21 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; }, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .optional_single_const_pointer, - .optional_single_mut_pointer, .pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, // TODO revisit this when we have the concept of the error tag type - .anyerror_void_error_union, .error_set_inferred, .error_set_single, .error_set, .error_set_merged, => return AbiAlignmentAdvanced{ .scalar = 2 }, - .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(mod, strat), - - .vector => { - const len = ty.arrayLen(); - const bits = try bitSizeAdvanced(ty.elemType(), mod, opt_sema); - const bytes = ((bits * len) + 7) / 8; - const alignment = std.math.ceilPowerOfTwoAssert(u64, bytes); - return AbiAlignmentAdvanced{ .scalar = @intCast(u32, alignment) }; - }, + .array, .array_sentinel => return ty.childType(mod).abiAlignmentAdvanced(mod, strat), .optional => { - var buf: Payload.ElemType = undefined; - const child_type = ty.optionalChild(&buf); + const child_type = ty.optionalChild(mod); switch (child_type.zigTypeTag(mod)) { .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, @@ -2933,8 +2417,29 @@ pub const Type = struct { }, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + const opt_sema = switch (strat) { + .sema => |sema| sema, + .eager => null, + .lazy => |arena| return AbiSizeAdvanced{ + .val = try Value.Tag.lazy_size.create(arena, ty), + }, + }; + const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); + const elem_bits = @intCast(u32, elem_bits_u64); + const total_bits = elem_bits * vector_type.len; + const total_bytes = (total_bits + 7) / 8; + const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |x| x, + .val => return AbiSizeAdvanced{ + .val = try Value.Tag.lazy_size.create(strat.lazy, ty), + }, + }; + const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + return AbiSizeAdvanced{ .scalar = result }; + }, + + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .bool, @@ -3014,7 +2519,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .single_const_pointer_to_comptime_int, .empty_struct_literal, .empty_struct, => return AbiSizeAdvanced{ .scalar = 0 }, @@ -3068,8 +2572,6 @@ pub const Type = struct { return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); }, - .array_u8 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8).?.data }, - .array_u8_sentinel_0 => return AbiSizeAdvanced{ .scalar = ty.castTag(.array_u8_sentinel_0).?.data + 1 }, .array => { const payload = ty.castTag(.array).?.data; switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { @@ -3093,47 +2595,7 @@ pub const Type = struct { } }, - .vector => { - const payload = ty.castTag(.vector).?.data; - const opt_sema = switch (strat) { - .sema => |sema| sema, - .eager => null, - .lazy => |arena| return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(arena, ty), - }, - }; - const elem_bits = try payload.elem_type.bitSizeAdvanced(mod, opt_sema); - const total_bits = elem_bits * payload.len; - const total_bytes = (total_bits + 7) / 8; - const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |x| x, - .val => return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(strat.lazy, ty), - }, - }; - const result = std.mem.alignForwardGeneric(u64, total_bytes, alignment); - return AbiSizeAdvanced{ .scalar = result }; - }, - - .anyframe_T, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, + .anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .pointer => switch (ty.castTag(.pointer).?.data.size) { .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, @@ -3141,7 +2603,6 @@ pub const Type = struct { }, // TODO revisit this when we have the concept of the error tag type - .anyerror_void_error_union, .error_set_inferred, .error_set, .error_set_merged, @@ -3149,8 +2610,7 @@ pub const Type = struct { => return AbiSizeAdvanced{ .scalar = 2 }, .optional => { - var buf: Payload.ElemType = undefined; - const child_type = ty.optionalChild(&buf); + const child_type = ty.optionalChild(mod); if (child_type.isNoReturn()) { return AbiSizeAdvanced{ .scalar = 0 }; @@ -3272,8 +2732,12 @@ pub const Type = struct { .int_type => |int_type| return int_type.bits, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + const child_ty = vector_type.child.toType(); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + return elem_bit_size * vector_type.len; + }, + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16 => return 16, @@ -3339,7 +2803,6 @@ pub const Type = struct { switch (ty.tag()) { .function => unreachable, // represents machine code; not a pointer - .single_const_pointer_to_comptime_int => unreachable, .empty_struct => unreachable, .empty_struct_literal => unreachable, .inferred_alloc_const => unreachable, @@ -3388,13 +2851,6 @@ pub const Type = struct { return size; }, - .vector => { - const payload = ty.castTag(.vector).?.data; - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); - return elem_bit_size * payload.len; - }, - .array_u8 => return 8 * ty.castTag(.array_u8).?.data, - .array_u8_sentinel_0 => return 8 * (ty.castTag(.array_u8_sentinel_0).?.data + 1), .array => { const payload = ty.castTag(.array).?.data; const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); @@ -3415,43 +2871,13 @@ pub const Type = struct { .anyframe_T => return target.ptrBitWidth(), - .const_slice, - .mut_slice, - => return target.ptrBitWidth() * 2, - - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return target.ptrBitWidth() * 2, - - .optional_single_const_pointer, - .optional_single_mut_pointer, - => { - return target.ptrBitWidth(); - }, - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => { - return target.ptrBitWidth(); - }, - .pointer => switch (ty.castTag(.pointer).?.data.size) { .Slice => return target.ptrBitWidth() * 2, else => return target.ptrBitWidth(), }, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return target.ptrBitWidth(), - .error_set, .error_set_single, - .anyerror_void_error_union, .error_set_inferred, .error_set_merged, => return 16, // TODO revisit this when we have the concept of the error tag type @@ -3481,12 +2907,11 @@ pub const Type = struct { return true; }, .Array => { - if (ty.arrayLenIncludingSentinel() == 0) return true; - return ty.childType().layoutIsResolved(mod); + if (ty.arrayLenIncludingSentinel(mod) == 0) return true; + return ty.childType(mod).layoutIsResolved(mod); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); return payload_ty.layoutIsResolved(mod); }, .ErrorUnion => { @@ -3500,9 +2925,6 @@ pub const Type = struct { pub fn isSinglePointer(ty: Type, mod: *const Module) bool { switch (ty.ip_index) { .none => return switch (ty.tag()) { - .single_const_pointer, - .single_mut_pointer, - .single_const_pointer_to_comptime_int, .inferred_alloc_const, .inferred_alloc_mut, => true, @@ -3519,54 +2941,33 @@ pub const Type = struct { } /// Asserts `ty` is a pointer. - pub fn ptrSize(ty: Type) std.builtin.Type.Pointer.Size { - return ptrSizeOrNull(ty).?; + pub fn ptrSize(ty: Type, mod: *const Module) std.builtin.Type.Pointer.Size { + return ptrSizeOrNull(ty, mod).?; } /// Returns `null` if `ty` is not a pointer. - pub fn ptrSizeOrNull(ty: Type) ?std.builtin.Type.Pointer.Size { - return switch (ty.tag()) { - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => .Slice, - - .many_const_pointer, - .many_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => .Many, - - .c_const_pointer, - .c_mut_pointer, - => .C, - - .single_const_pointer, - .single_mut_pointer, - .single_const_pointer_to_comptime_int, - .inferred_alloc_const, - .inferred_alloc_mut, - => .One, + pub fn ptrSizeOrNull(ty: Type, mod: *const Module) ?std.builtin.Type.Pointer.Size { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .inferred_alloc_const, + .inferred_alloc_mut, + => .One, - .pointer => ty.castTag(.pointer).?.data.size, + .pointer => ty.castTag(.pointer).?.data.size, - else => null, + else => null, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_info| ptr_info.size, + else => null, + }, }; } pub fn isSlice(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .const_slice, - .mut_slice, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => true, - .pointer => ty.castTag(.pointer).?.data.size == .Slice, - else => false, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -3583,78 +2984,28 @@ pub const Type = struct { pub fn slicePtrFieldType(self: Type, buffer: *SlicePtrFieldTypeBuffer) Type { switch (self.tag()) { - .const_slice_u8 => return Type.initTag(.manyptr_const_u8), - .const_slice_u8_sentinel_0 => return Type.initTag(.manyptr_const_u8_sentinel_0), - - .const_slice => { - const elem_type = self.castTag(.const_slice).?.data; - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_const_pointer }, - .data = elem_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - }, - .mut_slice => { - const elem_type = self.castTag(.mut_slice).?.data; - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_mut_pointer }, - .data = elem_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - }, - .pointer => { const payload = self.castTag(.pointer).?.data; assert(payload.size == .Slice); - if (payload.sentinel != null or - payload.@"align" != 0 or - payload.@"addrspace" != .generic or - payload.bit_offset != 0 or - payload.host_size != 0 or - payload.vector_index != .none or - payload.@"allowzero" or - payload.@"volatile") - { - buffer.* = .{ - .pointer = .{ - .data = .{ - .pointee_type = payload.pointee_type, - .sentinel = payload.sentinel, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = .Many, - }, - }, - }; - return Type.initPayload(&buffer.pointer.base); - } else if (payload.mutable) { - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_mut_pointer }, - .data = payload.pointee_type, - }, - }; - return Type.initPayload(&buffer.elem_type.base); - } else { - buffer.* = .{ - .elem_type = .{ - .base = .{ .tag = .many_const_pointer }, - .data = payload.pointee_type, + buffer.* = .{ + .pointer = .{ + .data = .{ + .pointee_type = payload.pointee_type, + .sentinel = payload.sentinel, + .@"align" = payload.@"align", + .@"addrspace" = payload.@"addrspace", + .bit_offset = payload.bit_offset, + .host_size = payload.host_size, + .vector_index = payload.vector_index, + .@"allowzero" = payload.@"allowzero", + .mutable = payload.mutable, + .@"volatile" = payload.@"volatile", + .size = .Many, }, - }; - return Type.initPayload(&buffer.elem_type.base); - } + }, + }; + return Type.initPayload(&buffer.pointer.base); }, else => unreachable, @@ -3663,19 +3014,7 @@ pub const Type = struct { pub fn isConstPtr(self: Type) bool { return switch (self.tag()) { - .single_const_pointer, - .many_const_pointer, - .c_const_pointer, - .single_const_pointer_to_comptime_int, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => true, - .pointer => !self.castTag(.pointer).?.data.mutable, - else => false, }; } @@ -3702,49 +3041,46 @@ pub const Type = struct { pub fn isCPtr(self: Type) bool { return switch (self.tag()) { - .c_const_pointer, - .c_mut_pointer, - => return true, - .pointer => self.castTag(.pointer).?.data.size == .C, else => return false, }; } - pub fn isPtrAtRuntime(self: Type, mod: *const Module) bool { - switch (self.tag()) { - .c_const_pointer, - .c_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .manyptr_u8, - .optional_single_const_pointer, - .optional_single_mut_pointer, - .single_const_pointer, - .single_const_pointer_to_comptime_int, - .single_mut_pointer, - => return true, + pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => switch (ty.castTag(.pointer).?.data.size) { + .Slice => return false, + .One, .Many, .C => return true, + }, - .pointer => switch (self.castTag(.pointer).?.data.size) { - .Slice => return false, - .One, .Many, .C => return true, - }, + .optional => { + const child_type = ty.optionalChild(mod); + if (child_type.zigTypeTag(mod) != .Pointer) return false; + const info = child_type.ptrInfo(mod); + switch (info.size) { + .Slice, .C => return false, + .Many, .One => return !info.@"allowzero", + } + }, - .optional => { - var buf: Payload.ElemType = undefined; - const child_type = self.optionalChild(&buf); - if (child_type.zigTypeTag(mod) != .Pointer) return false; - const info = child_type.ptrInfo().data; - switch (info.size) { - .Slice, .C => return false, - .Many, .One => return !info.@"allowzero", - } + else => return false, + }, + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => false, + .One, .Many, .C => true, + }, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |p| switch (p.size) { + .Slice, .C => false, + .Many, .One => !p.is_allowzero, + }, + else => false, + }, + else => false, }, - - else => return false, } } @@ -3754,23 +3090,17 @@ pub const Type = struct { if (ty.isPtrLikeOptional(mod)) { return true; } - return ty.ptrInfo().data.@"allowzero"; + return ty.ptrInfo(mod).@"allowzero"; } /// See also `isPtrLikeOptional`. pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return true, - .optional => { const child_ty = ty.castTag(.optional).?.data; switch (child_ty.zigTypeTag(mod)) { .Pointer => { - const info = child_ty.ptrInfo().data; + const info = child_ty.ptrInfo(mod); switch (info.size) { .C => return false, .Slice, .Many, .One => return !info.@"allowzero", @@ -3793,7 +3123,7 @@ pub const Type = struct { pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.size == .C, - .optional_type => |o| switch (mod.intern_pool.indexToKey(o.payload_type)) { + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice, .C => false, .Many, .One => !ptr_type.is_allowzero, @@ -3803,16 +3133,10 @@ pub const Type = struct { else => false, }; switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return true, - .optional => { const child_ty = ty.castTag(.optional).?.data; if (child_ty.zigTypeTag(mod) != .Pointer) return false; - const info = child_ty.ptrInfo().data; + const info = child_ty.ptrInfo(mod); switch (info.size) { .Slice, .C => return false, .Many, .One => return !info.@"allowzero", @@ -3828,43 +3152,24 @@ pub const Type = struct { /// For *[N]T, returns [N]T. /// For *T, returns T. /// For [*]T, returns T. - pub fn childType(ty: Type) Type { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.elem_type, - .array => ty.castTag(.array).?.data.elem_type, - .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => ty.castPointer().?.data, - - .array_u8, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => Type.u8, - - .single_const_pointer_to_comptime_int => Type.comptime_int, - .pointer => ty.castTag(.pointer).?.data.pointee_type, + pub fn childType(ty: Type, mod: *const Module) Type { + return childTypeIp(ty, mod.intern_pool); + } - else => unreachable, + pub fn childTypeIp(ty: Type, ip: InternPool) Type { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array => ty.castTag(.array).?.data.elem_type, + .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, + + .pointer => ty.castTag(.pointer).?.data.pointee_type, + + else => unreachable, + }, + else => ip.childType(ty.ip_index).toType(), }; } - /// Asserts the type is a pointer or array type. - /// TODO this is deprecated in favor of `childType`. - pub const elemType = childType; - /// For *[N]T, returns T. /// For ?*T, returns T. /// For ?*[N]T, returns T. @@ -3875,54 +3180,42 @@ pub const Type = struct { /// For []T, returns T. /// For anyframe->T, returns T. pub fn elemType2(ty: Type, mod: *const Module) Type { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.elem_type, - .array => ty.castTag(.array).?.data.elem_type, - .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => ty.castPointer().?.data, - - .single_const_pointer, - .single_mut_pointer, - => ty.castPointer().?.data.shallowElemType(mod), - - .array_u8, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => Type.u8, - - .single_const_pointer_to_comptime_int => Type.comptime_int, - .pointer => { - const info = ty.castTag(.pointer).?.data; - const child_ty = info.pointee_type; - if (info.size == .One) { - return child_ty.shallowElemType(mod); - } else { - return child_ty; - } - }, - .optional => ty.castTag(.optional).?.data.childType(), - .optional_single_mut_pointer => ty.castPointer().?.data, - .optional_single_const_pointer => ty.castPointer().?.data, + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array => ty.castTag(.array).?.data.elem_type, + .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, + + .pointer => { + const info = ty.castTag(.pointer).?.data; + const child_ty = info.pointee_type; + if (info.size == .One) { + return child_ty.shallowElemType(mod); + } else { + return child_ty; + } + }, + .optional => ty.castTag(.optional).?.data.childType(mod), - .anyframe_T => ty.castTag(.anyframe_T).?.data, + .anyframe_T => ty.castTag(.anyframe_T).?.data, - else => unreachable, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .One => ptr_type.elem_type.toType().shallowElemType(mod), + .Many, .C, .Slice => ptr_type.elem_type.toType(), + }, + .vector_type => |vector_type| vector_type.child.toType(), + .array_type => |array_type| array_type.child.toType(), + .opt_type => |child| mod.intern_pool.childType(child).toType(), + else => unreachable, + }, }; } fn shallowElemType(child_ty: Type, mod: *const Module) Type { return switch (child_ty.zigTypeTag(mod)) { - .Array, .Vector => child_ty.childType(), + .Array, .Vector => child_ty.childType(mod), else => child_ty, }; } @@ -3930,7 +3223,7 @@ pub const Type = struct { /// For vectors, returns the element type. Otherwise returns self. pub fn scalarType(ty: Type, mod: *const Module) Type { return switch (ty.zigTypeTag(mod)) { - .Vector => ty.childType(), + .Vector => ty.childType(mod), else => ty, }; } @@ -3938,51 +3231,25 @@ pub const Type = struct { /// Asserts that the type is an optional. /// Resulting `Type` will have inner memory referencing `buf`. /// Note that for C pointers this returns the type unmodified. - pub fn optionalChild(ty: Type, buf: *Payload.ElemType) Type { - return switch (ty.tag()) { - .optional => ty.castTag(.optional).?.data, - .optional_single_mut_pointer => { - buf.* = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty.castPointer().?.data, - }; - return Type.initPayload(&buf.base); - }, - .optional_single_const_pointer => { - buf.* = .{ - .base = .{ .tag = .single_const_pointer }, - .data = ty.castPointer().?.data, - }; - return Type.initPayload(&buf.base); - }, + pub fn optionalChild(ty: Type, mod: *const Module) Type { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .optional => ty.castTag(.optional).?.data, - .pointer, // here we assume it is a C pointer - .c_const_pointer, - .c_mut_pointer, - => return ty, + .pointer, // here we assume it is a C pointer + => return ty, - else => unreachable, - }; - } - - /// Asserts that the type is an optional. - /// Same as `optionalChild` but allocates the buffer if needed. - pub fn optionalChildAlloc(ty: Type, allocator: Allocator) !Type { - switch (ty.tag()) { - .optional => return ty.castTag(.optional).?.data, - .optional_single_mut_pointer => { - return Tag.single_mut_pointer.create(allocator, ty.castPointer().?.data); + else => unreachable, }, - .optional_single_const_pointer => { - return Tag.single_const_pointer.create(allocator, ty.castPointer().?.data); + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opt_type => |child| child.toType(), + .ptr_type => |ptr_type| b: { + assert(ptr_type.size == .C); + break :b ty; + }, + else => unreachable, }, - .pointer, // here we assume it is a C pointer - .c_const_pointer, - .c_mut_pointer, - => return ty, - - else => unreachable, - } + }; } /// Returns the tag type of a union, if the type is a union and it has a tag type. @@ -4071,19 +3338,25 @@ pub const Type = struct { } /// Asserts that the type is an error union. - pub fn errorUnionPayload(self: Type) Type { - return switch (self.tag()) { - .anyerror_void_error_union => Type.void, - .error_union => self.castTag(.error_union).?.data.payload, - else => unreachable, + pub fn errorUnionPayload(ty: Type) Type { + return switch (ty.ip_index) { + .anyerror_void_error_union_type => Type.void, + .none => switch (ty.tag()) { + .error_union => ty.castTag(.error_union).?.data.payload, + else => unreachable, + }, + else => @panic("TODO"), }; } - pub fn errorUnionSet(self: Type) Type { - return switch (self.tag()) { - .anyerror_void_error_union => Type.anyerror, - .error_union => self.castTag(.error_union).?.data.error_set, - else => unreachable, + pub fn errorUnionSet(ty: Type) Type { + return switch (ty.ip_index) { + .anyerror_void_error_union_type => Type.anyerror, + .none => switch (ty.tag()) { + .error_union => ty.castTag(.error_union).?.data.error_set, + else => unreachable, + }, + else => @panic("TODO"), }; } @@ -4168,67 +3441,73 @@ pub const Type = struct { } /// Asserts the type is an array or vector or struct. - pub fn arrayLen(ty: Type) u64 { - return switch (ty.tag()) { - .vector => ty.castTag(.vector).?.data.len, - .array => ty.castTag(.array).?.data.len, - .array_sentinel => ty.castTag(.array_sentinel).?.data.len, - .array_u8 => ty.castTag(.array_u8).?.data, - .array_u8_sentinel_0 => ty.castTag(.array_u8_sentinel_0).?.data, - .tuple => ty.castTag(.tuple).?.data.types.len, - .anon_struct => ty.castTag(.anon_struct).?.data.types.len, - .@"struct" => ty.castTag(.@"struct").?.data.fields.count(), - .empty_struct, .empty_struct_literal => 0, + pub fn arrayLen(ty: Type, mod: *const Module) u64 { + return arrayLenIp(ty, mod.intern_pool); + } - else => unreachable, + pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array => ty.castTag(.array).?.data.len, + .array_sentinel => ty.castTag(.array_sentinel).?.data.len, + .tuple => ty.castTag(.tuple).?.data.types.len, + .anon_struct => ty.castTag(.anon_struct).?.data.types.len, + .@"struct" => ty.castTag(.@"struct").?.data.fields.count(), + .empty_struct, .empty_struct_literal => 0, + + else => unreachable, + }, + else => switch (ip.indexToKey(ty.ip_index)) { + .vector_type => |vector_type| vector_type.len, + .array_type => |array_type| array_type.len, + else => unreachable, + }, }; } - pub fn arrayLenIncludingSentinel(ty: Type) u64 { - return ty.arrayLen() + @boolToInt(ty.sentinel() != null); + pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 { + return ty.arrayLen(mod) + @boolToInt(ty.sentinel(mod) != null); } - pub fn vectorLen(ty: Type) u32 { - return switch (ty.tag()) { - .vector => @intCast(u32, ty.castTag(.vector).?.data.len), - .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len), - .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len), - else => unreachable, + pub fn vectorLen(ty: Type, mod: *const Module) u32 { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len), + .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len), + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .vector_type => |vector_type| vector_type.len, + else => unreachable, + }, }; } /// Asserts the type is an array, pointer or vector. - pub fn sentinel(self: Type) ?Value { - return switch (self.tag()) { - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer_to_comptime_int, - .vector, - .array, - .array_u8, - .manyptr_u8, - .manyptr_const_u8, - .const_slice_u8, - .const_slice, - .mut_slice, - .tuple, - .empty_struct_literal, - .@"struct", - => return null, + pub fn sentinel(ty: Type, mod: *const Module) ?Value { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .array, + .tuple, + .empty_struct_literal, + .@"struct", + => null, - .pointer => return self.castTag(.pointer).?.data.sentinel, - .array_sentinel => return self.castTag(.array_sentinel).?.data.sentinel, + .pointer => ty.castTag(.pointer).?.data.sentinel, + .array_sentinel => ty.castTag(.array_sentinel).?.data.sentinel, - .array_u8_sentinel_0, - .const_slice_u8_sentinel_0, - .manyptr_const_u8_sentinel_0, - => return Value.zero, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .vector_type, + .struct_type, + => null, - else => unreachable, + .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + .ptr_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + + else => unreachable, + }, }; } @@ -4292,8 +3571,6 @@ pub const Type = struct { return .{ .signedness = .unsigned, .bits = 16 }; }, - .vector => ty = ty.castTag(.vector).?.data.elem_type, - .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout == .Packed); @@ -4321,8 +3598,9 @@ pub const Type = struct { .int_type => |int_type| return int_type, .ptr_type => unreachable, .array_type => unreachable, - .vector_type => @panic("TODO"), - .optional_type => unreachable, + .vector_type => |vector_type| ty = vector_type.child.toType(), + + .opt_type => unreachable, .error_union_type => unreachable, .simple_type => unreachable, // handled via Index enum tag above .struct_type => @panic("TODO"), @@ -4426,7 +3704,11 @@ pub const Type = struct { /// Asserts the type is a function or a function pointer. pub fn fnReturnType(ty: Type) Type { - const fn_ty = if (ty.castPointer()) |p| p.data else ty; + const fn_ty = switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.pointee_type, + .function => ty, + else => unreachable, + }; return fn_ty.castTag(.function).?.data.return_type; } @@ -4516,8 +3798,12 @@ pub const Type = struct { }, .ptr_type => @panic("TODO"), .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .vector_type => |vector_type| { + if (vector_type.len == 0) return Value.initTag(.empty_array); + if (vector_type.child.toType().onePossibleValue(mod)) |v| return v; + return null; + }, + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -4580,34 +3866,15 @@ pub const Type = struct { .error_set, .error_set_merged, .function, - .single_const_pointer_to_comptime_int, .array_sentinel, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .anyerror_void_error_union, .error_set_inferred, .@"opaque", - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, .anyframe_T, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer, - .single_mut_pointer, .pointer, => return null, .optional => { - var buf: Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { return Value.null; } else { @@ -4690,10 +3957,10 @@ pub const Type = struct { .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .vector, .array, .array_u8 => { - if (ty.arrayLen() == 0) + .array => { + if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); - if (ty.elemType().onePossibleValue(mod) != null) + if (ty.childType(mod).onePossibleValue(mod) != null) return Value.initTag(.the_only_possible_value); return null; }, @@ -4711,9 +3978,9 @@ pub const Type = struct { if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, .ptr_type => @panic("TODO"), - .array_type => @panic("TODO"), - .vector_type => @panic("TODO"), - .optional_type => @panic("TODO"), + .array_type => |array_type| return array_type.child.toType().comptimeOnly(mod), + .vector_type => |vector_type| return vector_type.child.toType().comptimeOnly(mod), + .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), .simple_type => |t| switch (t) { .f16, @@ -4772,12 +4039,6 @@ pub const Type = struct { }; return switch (ty.tag()) { - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, .empty_struct_literal, .empty_struct, .error_set, @@ -4785,35 +4046,21 @@ pub const Type = struct { .error_set_inferred, .error_set_merged, .@"opaque", - .array_u8, - .array_u8_sentinel_0, .enum_simple, => false, - .single_const_pointer_to_comptime_int, // These are function bodies, not function pointers. - .function, - => true, + .function => true, .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .array, .array_sentinel, - .vector, - => return ty.childType().comptimeOnly(mod), + => return ty.childType(mod).comptimeOnly(mod), - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); + .pointer => { + const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { return false; } else { @@ -4821,12 +4068,8 @@ pub const Type = struct { } }, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return ty.optionalChild(&buf).comptimeOnly(mod); + .optional => { + return ty.optionalChild(mod).comptimeOnly(mod); }, .tuple, .anon_struct => { @@ -4882,6 +4125,10 @@ pub const Type = struct { }; } + pub fn isVector(ty: Type, mod: *const Module) bool { + return ty.zigTypeTag(mod) == .Vector; + } + pub fn isArrayOrVector(ty: Type, mod: *const Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, @@ -4892,9 +4139,9 @@ pub const Type = struct { pub fn isIndexable(ty: Type, mod: *const Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice, .Many, .C => true, - .One => ty.elemType().zigTypeTag(mod) == .Array, + .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, @@ -4904,10 +4151,10 @@ pub const Type = struct { pub fn indexableHasLen(ty: Type, mod: *const Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Many, .C => false, .Slice => true, - .One => ty.elemType().zigTypeTag(mod) == .Array, + .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, .Struct => ty.isTuple(), else => false, @@ -5527,14 +4774,6 @@ pub const Type = struct { /// with different enum tags, because the the former requires more payload data than the latter. /// See `zigTypeTag` for the function that corresponds to `std.builtin.TypeId`. pub const Tag = enum(usize) { - // The first section of this enum are tags that require no payload. - manyptr_u8, - manyptr_const_u8, - manyptr_const_u8_sentinel_0, - single_const_pointer_to_comptime_int, - const_slice_u8, - const_slice_u8_sentinel_0, - anyerror_void_error_union, /// Same as `empty_struct` except it has an empty namespace. empty_struct_literal, /// This is a special value that tracks a set of types that have been stored @@ -5545,28 +4784,15 @@ pub const Type = struct { inferred_alloc_const, // See last_no_payload_tag below. // After this, the tag requires a payload. - array_u8, - array_u8_sentinel_0, array, array_sentinel, - vector, /// Possible Value tags for this: @"struct" tuple, /// Possible Value tags for this: @"struct" anon_struct, pointer, - single_const_pointer, - single_mut_pointer, - many_const_pointer, - many_mut_pointer, - c_const_pointer, - c_mut_pointer, - const_slice, - mut_slice, function, optional, - optional_single_mut_pointer, - optional_single_const_pointer, error_union, anyframe_T, error_set, @@ -5590,33 +4816,12 @@ pub const Type = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .single_const_pointer_to_comptime_int, - .anyerror_void_error_union, - .const_slice_u8, - .const_slice_u8_sentinel_0, .inferred_alloc_const, .inferred_alloc_mut, .empty_struct_literal, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), - .array_u8, - .array_u8_sentinel_0, - => Payload.Len, - - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, .anyframe_T, => Payload.ElemType, @@ -5624,7 +4829,7 @@ pub const Type = struct { .error_set_inferred => Payload.ErrorSetInferred, .error_set_merged => Payload.ErrorSetMerged, - .array, .vector => Payload.Array, + .array => Payload.Array, .array_sentinel => Payload.ArraySentinel, .pointer => Payload.Pointer, .function => Payload.Function, @@ -5847,15 +5052,28 @@ pub const Type = struct { @"volatile": bool = false, size: std.builtin.Type.Pointer.Size = .One, - pub const VectorIndex = enum(u32) { - none = std.math.maxInt(u32), - runtime = std.math.maxInt(u32) - 1, - _, - }; + pub const VectorIndex = InternPool.Key.PtrType.VectorIndex; + pub fn alignment(data: Data, mod: *const Module) u32 { if (data.@"align" != 0) return data.@"align"; return abiAlignment(data.pointee_type, mod); } + + pub fn fromKey(p: InternPool.Key.PtrType) Data { + return .{ + .pointee_type = p.elem_type.toType(), + .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null, + .@"align" = p.alignment, + .@"addrspace" = p.address_space, + .bit_offset = p.bit_offset, + .host_size = p.host_size, + .vector_index = p.vector_index, + .@"allowzero" = p.is_allowzero, + .mutable = !p.is_const, + .@"volatile" = p.is_volatile, + .size = p.size, + }; + } }; }; @@ -5986,6 +5204,17 @@ pub const Type = struct { pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type, .legacy = undefined }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type, .legacy = undefined }; + pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type, .legacy = undefined }; + pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type, .legacy = undefined }; + pub const single_const_pointer_to_comptime_int: Type = .{ + .ip_index = .single_const_pointer_to_comptime_int_type, + .legacy = undefined, + }; + pub const const_slice_u8_sentinel_0: Type = .{ + .ip_index = .const_slice_u8_sentinel_0_type, + .legacy = undefined, + }; + pub const generic_poison: Type = .{ .ip_index = .generic_poison_type, .legacy = undefined }; pub const err_int = Type.u16; @@ -6019,50 +5248,6 @@ pub const Type = struct { } } - if (d.@"align" == 0 and d.@"addrspace" == .generic and - d.bit_offset == 0 and d.host_size == 0 and d.vector_index == .none and - !d.@"allowzero" and !d.@"volatile") - { - if (d.sentinel) |sent| { - if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) { - switch (d.size) { - .Slice => { - if (sent.compareAllWithZero(.eq, mod)) { - return Type.initTag(.const_slice_u8_sentinel_0); - } - }, - .Many => { - if (sent.compareAllWithZero(.eq, mod)) { - return Type.initTag(.manyptr_const_u8_sentinel_0); - } - }, - else => {}, - } - } - } else if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) { - switch (d.size) { - .Slice => return Type.initTag(.const_slice_u8), - .Many => return Type.initTag(.manyptr_const_u8), - else => {}, - } - } else { - const T = Type.Tag; - const type_payload = try arena.create(Type.Payload.ElemType); - type_payload.* = .{ - .base = .{ - .tag = switch (d.size) { - .One => if (d.mutable) T.single_mut_pointer else T.single_const_pointer, - .Many => if (d.mutable) T.many_mut_pointer else T.many_const_pointer, - .C => if (d.mutable) T.c_mut_pointer else T.c_const_pointer, - .Slice => if (d.mutable) T.mut_slice else T.const_slice, - }, - }, - .data = d.pointee_type, - }; - return Type.initPayload(&type_payload.base); - } - } - return Type.Tag.pointer.create(arena, d); } @@ -6073,13 +5258,21 @@ pub const Type = struct { elem_type: Type, mod: *Module, ) Allocator.Error!Type { - if (elem_type.eql(Type.u8, mod)) { - if (sent) |some| { - if (some.eql(Value.zero, elem_type, mod)) { - return Tag.array_u8_sentinel_0.create(arena, len); + if (elem_type.ip_index != .none) { + if (sent) |s| { + if (s.ip_index != .none) { + return mod.arrayType(.{ + .len = len, + .child = elem_type.ip_index, + .sentinel = s.ip_index, + }); } } else { - return Tag.array_u8.create(arena, len); + return mod.arrayType(.{ + .len = len, + .child = elem_type.ip_index, + .sentinel = .none, + }); } } @@ -6097,24 +5290,11 @@ pub const Type = struct { }); } - pub fn vector(arena: Allocator, len: u64, elem_type: Type) Allocator.Error!Type { - return Tag.vector.create(arena, .{ - .len = len, - .elem_type = elem_type, - }); - } - - pub fn optional(arena: Allocator, child_type: Type) Allocator.Error!Type { - switch (child_type.tag()) { - .single_const_pointer => return Type.Tag.optional_single_const_pointer.create( - arena, - child_type.elemType(), - ), - .single_mut_pointer => return Type.Tag.optional_single_mut_pointer.create( - arena, - child_type.elemType(), - ), - else => return Type.Tag.optional.create(arena, child_type), + pub fn optional(arena: Allocator, child_type: Type, mod: *Module) Allocator.Error!Type { + if (child_type.ip_index != .none) { + return mod.optionalType(child_type.ip_index); + } else { + return Type.Tag.optional.create(arena, child_type); } } @@ -6125,12 +5305,6 @@ pub const Type = struct { mod: *Module, ) Allocator.Error!Type { assert(error_set.zigTypeTag(mod) == .ErrorSet); - if (error_set.eql(Type.anyerror, mod) and - payload.eql(Type.void, mod)) - { - return Type.initTag(.anyerror_void_error_union); - } - return Type.Tag.error_union.create(arena, .{ .error_set = error_set, .payload = payload, diff --git a/src/value.zig b/src/value.zig index cbf18c672c..6f7210c884 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,14 +33,6 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - manyptr_u8_type, - manyptr_const_u8_type, - manyptr_const_u8_sentinel_0_type, - single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - const_slice_u8_sentinel_0_type, - anyerror_void_error_union_type, - undef, zero, one, @@ -140,11 +132,6 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, - .anyerror_void_error_union_type, - .undef, .zero, .one, @@ -153,9 +140,6 @@ pub const Value = struct { .empty_struct_value, .empty_array, .null_value, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), .int_big_positive, @@ -280,9 +264,7 @@ pub const Value = struct { } pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { - if (self.ip_index != .none) { - return null; - } + assert(self.ip_index == .none); if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) return null; @@ -305,11 +287,6 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, - .anyerror_void_error_union_type, - .undef, .zero, .one, @@ -318,9 +295,6 @@ pub const Value = struct { .empty_array, .null_value, .empty_struct_value, - .manyptr_u8_type, - .manyptr_const_u8_type, - .manyptr_const_u8_sentinel_0_type, => unreachable, .ty, .lazy_align, .lazy_size => { @@ -553,14 +527,6 @@ pub const Value = struct { } var val = start_val; while (true) switch (val.tag()) { - .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"), - .const_slice_u8_type => return out_stream.writeAll("[]const u8"), - .const_slice_u8_sentinel_0_type => return out_stream.writeAll("[:0]const u8"), - .anyerror_void_error_union_type => return out_stream.writeAll("anyerror!void"), - .manyptr_u8_type => return out_stream.writeAll("[*]u8"), - .manyptr_const_u8_type => return out_stream.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0_type => return out_stream.writeAll("[*:0]const u8"), - .empty_struct_value => return out_stream.writeAll("struct {}{}"), .aggregate => { return out_stream.writeAll("(aggregate)"); @@ -674,7 +640,7 @@ pub const Value = struct { switch (val.tag()) { .bytes => { const bytes = val.castTag(.bytes).?.data; - const adjusted_len = bytes.len - @boolToInt(ty.sentinel() != null); + const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null); const adjusted_bytes = bytes[0..adjusted_len]; return allocator.dupe(u8, adjusted_bytes); }, @@ -686,7 +652,7 @@ pub const Value = struct { .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), .repeated => { const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen())); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); @memset(result, byte); return result; }, @@ -701,7 +667,7 @@ pub const Value = struct { const slice = val.castTag(.slice).?.data; return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); }, - else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod), + else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), } } @@ -720,13 +686,6 @@ pub const Value = struct { if (self.ip_index != .none) return self.ip_index.toType(); return switch (self.tag()) { .ty => self.castTag(.ty).?.data, - .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), - .const_slice_u8_type => Type.initTag(.const_slice_u8), - .const_slice_u8_sentinel_0_type => Type.initTag(.const_slice_u8_sentinel_0), - .anyerror_void_error_union_type => Type.initTag(.anyerror_void_error_union), - .manyptr_u8_type => Type.initTag(.manyptr_u8), - .manyptr_const_u8_type => Type.initTag(.manyptr_const_u8), - .manyptr_const_u8_sentinel_0_type => Type.initTag(.manyptr_const_u8_sentinel_0), else => unreachable, }; @@ -1096,8 +1055,8 @@ pub const Value = struct { else => unreachable, }, .Array => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); const elem_size = @intCast(usize, elem_ty.abiSize(mod)); var elem_i: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; @@ -1150,8 +1109,7 @@ pub const Value = struct { }, .Optional => { if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout; - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); @@ -1220,9 +1178,9 @@ pub const Value = struct { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); - const len = @intCast(usize, ty.arrayLen()); + const len = @intCast(usize, ty.arrayLen(mod)); var bits: u16 = 0; var elem_i: usize = 0; @@ -1267,8 +1225,7 @@ pub const Value = struct { }, .Optional => { assert(ty.isPtrLikeOptional(mod)); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); const opt_val = val.optionalValue(mod); if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); @@ -1335,9 +1292,9 @@ pub const Value = struct { else => unreachable, }, .Array => { - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); + const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); var offset: usize = 0; for (elems) |*elem| { elem.* = try readFromMemory(elem_ty, mod, buffer[offset..], arena); @@ -1386,8 +1343,7 @@ pub const Value = struct { }, .Optional => { assert(ty.isPtrLikeOptional(mod)); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); return readFromMemory(child, mod, buffer, arena); }, else => @panic("TODO implement readFromMemory for more types"), @@ -1449,8 +1405,8 @@ pub const Value = struct { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen())); + const elem_ty = ty.childType(mod); + const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); var bits: u16 = 0; const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); @@ -1483,8 +1439,7 @@ pub const Value = struct { }, .Optional => { assert(ty.isPtrLikeOptional(mod)); - var buf: Type.Payload.ElemType = undefined; - const child = ty.optionalChild(&buf); + const child = ty.optionalChild(mod); return readFromPackedMemory(child, mod, buffer, bit_offset, arena); }, else => @panic("TODO implement readFromPackedMemory for more types"), @@ -1956,7 +1911,7 @@ pub const Value = struct { pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < ty.vectorLen()) : (i += 1) { + while (i < ty.vectorLen(mod)) : (i += 1) { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); @@ -2092,8 +2047,7 @@ pub const Value = struct { .opt_payload => { const a_payload = a.castTag(.opt_payload).?.data; const b_payload = b.castTag(.opt_payload).?.data; - var buffer: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buffer); + const payload_ty = ty.optionalChild(mod); return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); }, .slice => { @@ -2175,7 +2129,7 @@ pub const Value = struct { return true; } - const elem_ty = ty.childType(); + const elem_ty = ty.childType(mod); for (a_field_vals, 0..) |a_elem, i| { const b_elem = b_field_vals[i]; @@ -2239,8 +2193,8 @@ pub const Value = struct { return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); var i: usize = 0; var a_buf: ElemValueBuffer = undefined; var b_buf: ElemValueBuffer = undefined; @@ -2253,11 +2207,11 @@ pub const Value = struct { } return true; }, - .Pointer => switch (ty.ptrSize()) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice => { - const a_len = switch (a_ty.ptrSize()) { + const a_len = switch (a_ty.ptrSize(mod)) { .Slice => a.sliceLen(mod), - .One => a_ty.childType().arrayLen(), + .One => a_ty.childType(mod).arrayLen(mod), else => unreachable, }; if (a_len != b.sliceLen(mod)) { @@ -2266,7 +2220,7 @@ pub const Value = struct { var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = ty.slicePtrFieldType(&ptr_buf); - const a_ptr = switch (a_ty.ptrSize()) { + const a_ptr = switch (a_ty.ptrSize(mod)) { .Slice => a.slicePtr(), .One => a, else => unreachable, @@ -2412,8 +2366,8 @@ pub const Value = struct { else => return hashPtr(val, hasher, mod), }, .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); var index: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { @@ -2438,8 +2392,7 @@ pub const Value = struct { if (val.castTag(.opt_payload)) |payload| { std.hash.autoHash(hasher, true); // non-null const sub_val = payload.data; - var buffer: Type.Payload.ElemType = undefined; - const sub_ty = ty.optionalChild(&buffer); + const sub_ty = ty.optionalChild(mod); sub_val.hash(sub_ty, hasher, mod); } else { std.hash.autoHash(hasher, false); // null @@ -2534,8 +2487,8 @@ pub const Value = struct { else => val.hashPtr(hasher, mod), }, .Array, .Vector => { - const len = ty.arrayLen(); - const elem_ty = ty.childType(); + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); var index: usize = 0; var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { @@ -2544,8 +2497,7 @@ pub const Value = struct { } }, .Optional => if (val.castTag(.opt_payload)) |payload| { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); payload.data.hashUncoerced(child_ty, hasher, mod); } else std.hash.autoHash(hasher, std.builtin.TypeId.Null), .ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else { @@ -2720,7 +2672,7 @@ pub const Value = struct { const decl_index = val.castTag(.decl_ref).?.data; const decl = mod.declPtr(decl_index); if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(); + return decl.ty.arrayLen(mod); } else { return 1; } @@ -2729,7 +2681,7 @@ pub const Value = struct { const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; const decl = mod.declPtr(decl_index); if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(); + return decl.ty.arrayLen(mod); } else { return 1; } @@ -2737,7 +2689,7 @@ pub const Value = struct { .comptime_field_ptr => { const payload = val.castTag(.comptime_field_ptr).?.data; if (payload.field_ty.zigTypeTag(mod) == .Array) { - return payload.field_ty.arrayLen(); + return payload.field_ty.arrayLen(mod); } else { return 1; } @@ -3137,7 +3089,7 @@ pub const Value = struct { pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, int_ty.vectorLen()); + const result_data = try arena.alloc(Value, int_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -3250,7 +3202,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3298,7 +3250,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3345,8 +3297,8 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try arena.alloc(Value, ty.vectorLen()); - const result_data = try arena.alloc(Value, ty.vectorLen()); + const overflowed_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3408,7 +3360,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3452,7 +3404,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3527,7 +3479,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -3565,7 +3517,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3601,7 +3553,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3631,7 +3583,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3666,7 +3618,7 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3701,7 +3653,7 @@ pub const Value = struct { pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3741,7 +3693,7 @@ pub const Value = struct { pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3781,7 +3733,7 @@ pub const Value = struct { pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3857,7 +3809,7 @@ pub const Value = struct { pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3904,7 +3856,7 @@ pub const Value = struct { pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3950,7 +3902,7 @@ pub const Value = struct { pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -3986,7 +3938,7 @@ pub const Value = struct { pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4007,7 +3959,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4038,7 +3990,7 @@ pub const Value = struct { pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4078,8 +4030,8 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try allocator.alloc(Value, ty.vectorLen()); - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const overflowed_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4136,7 +4088,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4184,7 +4136,7 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen()); + const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4212,7 +4164,7 @@ pub const Value = struct { pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen()); + const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4264,7 +4216,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4300,7 +4252,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4359,7 +4311,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4418,7 +4370,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4477,7 +4429,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var lhs_buf: Value.ElemValueBuffer = undefined; var rhs_buf: Value.ElemValueBuffer = undefined; @@ -4530,7 +4482,7 @@ pub const Value = struct { pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4570,7 +4522,7 @@ pub const Value = struct { pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4610,7 +4562,7 @@ pub const Value = struct { pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4650,7 +4602,7 @@ pub const Value = struct { pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4690,7 +4642,7 @@ pub const Value = struct { pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4730,7 +4682,7 @@ pub const Value = struct { pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4770,7 +4722,7 @@ pub const Value = struct { pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4810,7 +4762,7 @@ pub const Value = struct { pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4850,7 +4802,7 @@ pub const Value = struct { pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4890,7 +4842,7 @@ pub const Value = struct { pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4930,7 +4882,7 @@ pub const Value = struct { pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -4970,7 +4922,7 @@ pub const Value = struct { pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -5010,7 +4962,7 @@ pub const Value = struct { pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -5050,7 +5002,7 @@ pub const Value = struct { pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var buf: Value.ElemValueBuffer = undefined; const elem_val = val.elemValueBuffer(mod, i, &buf); @@ -5097,7 +5049,7 @@ pub const Value = struct { ) !Value { const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen()); + const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { var mulend1_buf: Value.ElemValueBuffer = undefined; const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf); -- cgit v1.2.3 From 6ab8b6f8b273356ce248a075b6a0657bfea33c79 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 4 May 2023 21:40:35 -0700 Subject: stage2: move undef, unreach, null values to InternPool --- src/Module.zig | 8 +- src/Sema.zig | 1102 +++++++++++++++++++++++---------------------- src/TypedValue.zig | 575 +++++++++++------------ src/arch/wasm/CodeGen.zig | 14 +- src/codegen.zig | 150 +++--- src/codegen/c.zig | 242 +++++----- src/codegen/llvm.zig | 119 ++--- src/codegen/spirv.zig | 11 +- src/type.zig | 58 +-- src/value.zig | 494 ++++++++++---------- 10 files changed, 1440 insertions(+), 1333 deletions(-) (limited to 'src/arch') diff --git a/src/Module.zig b/src/Module.zig index 67ca91266c..b1cbd88297 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -932,7 +932,7 @@ pub const Decl = struct { assert(decl.has_tv); return switch (decl.val.tag()) { .extern_fn => true, - .variable => decl.val.castTag(.variable).?.data.init.tag() == .unreachable_value, + .variable => decl.val.castTag(.variable).?.data.init.ip_index == .unreachable_value, else => false, }; } @@ -4849,6 +4849,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { var is_extern = false; switch (decl_tv.val.ip_index) { .generic_poison => unreachable, + .unreachable_value => unreachable, + .none => switch (decl_tv.val.tag()) { .variable => { const variable = decl_tv.val.castTag(.variable).?.data; @@ -4869,8 +4871,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { } }, - .unreachable_value => unreachable, - .function => {}, else => { @@ -6592,7 +6592,7 @@ pub fn populateTestFunctions( .len = try Value.Tag.int_u64.create(arena, test_name_slice.len), }), // name try Value.Tag.decl_ref.create(arena, test_decl_index), // func - Value.initTag(.null_value), // async_frame_size + Value.null, // async_frame_size }; test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals); } diff --git a/src/Sema.zig b/src/Sema.zig index 87df2f23e1..3406d0d80f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1877,8 +1877,8 @@ fn resolveConstValue( if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { switch (val.ip_index) { .generic_poison => return error.GenericPoison, + .undef => return sema.failWithUseOfUndef(block, src), .none => switch (val.tag()) { - .undef => return sema.failWithUseOfUndef(block, src), .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, @@ -4409,7 +4409,7 @@ fn validateStructInit( if (field_ptr != 0) continue; const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { if (struct_ty.isTuple()) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4554,7 +4554,7 @@ fn validateStructInit( } const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { if (struct_ty.isTuple()) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4644,7 +4644,7 @@ fn zirValidateArrayInit( var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -7885,7 +7885,7 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) const tuple = ty.tupleFields(); for (tuple.values, 0..) |field_val, i| { try sema.resolveTupleLazyValues(block, src, tuple.types[i]); - if (field_val.tag() == .unreachable_value) continue; + if (field_val.ip_index == .unreachable_value) continue; try sema.resolveLazyValue(field_val); } } @@ -12641,7 +12641,7 @@ fn analyzeTupleCat( const default_val = lhs_ty.structFieldDefaultValue(i); values[i] = default_val; const operand_src = lhs_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { runtime_src = operand_src; } } @@ -12651,7 +12651,7 @@ fn analyzeTupleCat( const default_val = rhs_ty.structFieldDefaultValue(i); values[i + lhs_len] = default_val; const operand_src = rhs_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { runtime_src = operand_src; } } @@ -12809,8 +12809,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type; - const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.initTag(.unreachable_value); - const elem_val = if (elem_default_val.tag() == .unreachable_value) try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_elem_i) else elem_default_val; + const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.@"unreachable"; + const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); @@ -12819,8 +12819,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type; - const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.initTag(.unreachable_value); - const elem_val = if (elem_default_val.tag() == .unreachable_value) try rhs_sub_val.elemValue(sema.mod, sema.arena, rhs_elem_i) else elem_default_val; + const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.@"unreachable"; + const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(sema.mod, sema.arena, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); @@ -12962,7 +12962,7 @@ fn analyzeTupleMul( types[i] = operand_ty.structFieldType(i); values[i] = operand_ty.structFieldDefaultValue(i); const operand_src = lhs_src; // TODO better source location - if (values[i].tag() == .unreachable_value) { + if (values[i].ip_index == .unreachable_value) { runtime_src = operand_src; } } @@ -14332,7 +14332,7 @@ fn zirOverflowArithmetic( var result: struct { inst: Air.Inst.Ref = .none, - wrapped: Value = Value.initTag(.unreachable_value), + wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { switch (zir_tag) { @@ -14508,8 +14508,8 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { types[0] = ty; types[1] = ov_ty; - values[0] = Value.initTag(.unreachable_value); - values[1] = Value.initTag(.unreachable_value); + values[0] = Value.@"unreachable"; + values[1] = Value.@"unreachable"; return tuple_ty; } @@ -15647,7 +15647,7 @@ fn zirClosureCapture( // value only. In such case we preserve the type and use a dummy runtime value. const operand = try sema.resolveInst(inst_data.operand); const val = (try sema.resolveMaybeUndefValAllowVariables(operand)) orelse - Value.initTag(.unreachable_value); + Value.@"unreachable"; try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{ .ty = try sema.typeOf(operand).copy(sema.perm_arena), @@ -15684,7 +15684,7 @@ fn zirClosureGet( scope = scope.parent.?; }; - if (tv.val.tag() == .unreachable_value and !block.is_typeof and sema.func == null) { + if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func == null) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(); @@ -15712,7 +15712,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.tag() == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { + if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(); @@ -15742,7 +15742,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.tag() == .unreachable_value) { + if (tv.val.ip_index == .unreachable_value) { assert(block.is_typeof); // We need a dummy runtime instruction with the correct type. return block.addTy(.alloc, tv.ty); @@ -16477,7 +16477,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const struct_field_fields = try fields_anon_decl.arena().create([5]Value); const field_val = tuple.values[i]; - const is_comptime = field_val.tag() != .unreachable_value; + const is_comptime = field_val.ip_index != .unreachable_value; const opt_default_val = if (is_comptime) field_val else null; const default_val_ptr = try sema.optRefValue(block, field_ty, opt_default_val); struct_field_fields.* = .{ @@ -16518,7 +16518,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const struct_field_fields = try fields_anon_decl.arena().create([5]Value); - const opt_default_val = if (field.default_val.tag() == .unreachable_value) + const opt_default_val = if (field.default_val.ip_index == .unreachable_value) null else field.default_val; @@ -16570,7 +16570,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); } else { - break :blk Value.initTag(.null_value); + break :blk Value.null; } }; @@ -17974,7 +17974,7 @@ fn finishStructInit( for (struct_obj.values, 0..) |default_val, i| { if (field_inits[i] != .none) continue; - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { const field_name = struct_obj.names[i]; const template = "missing struct field: {s}"; const args = .{field_name}; @@ -17994,7 +17994,7 @@ fn finishStructInit( if (field_inits[i] != .none) continue; const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -18010,7 +18010,7 @@ fn finishStructInit( for (struct_obj.fields.values(), 0..) |field, i| { if (field_inits[i] != .none) continue; - if (field.default_val.tag() == .unreachable_value) { + if (field.default_val.ip_index == .unreachable_value) { const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; const args = .{field_name}; @@ -18145,7 +18145,7 @@ fn zirStructInitAnon( if (try sema.resolveMaybeUndefVal(init)) |init_val| { values[i] = init_val; } else { - values[i] = Value.initTag(.unreachable_value); + values[i] = Value.@"unreachable"; runtime_index = i; } } @@ -18191,7 +18191,7 @@ fn zirStructInitAnon( .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = field_ty, }); - if (values[i].tag() == .unreachable_value) { + if (values[i].ip_index == .unreachable_value) { const init = try sema.resolveInst(item.data.init); const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, init); @@ -18357,7 +18357,7 @@ fn zirArrayInitAnon( if (try sema.resolveMaybeUndefVal(elem)) |val| { values[i] = val; } else { - values[i] = Value.initTag(.unreachable_value); + values[i] = Value.@"unreachable"; runtime_src = operand_src; } } @@ -18390,7 +18390,7 @@ fn zirArrayInitAnon( .@"addrspace" = target_util.defaultAddressSpace(target, .local), .pointee_type = types[i], }); - if (values[i].tag() == .unreachable_value) { + if (values[i].ip_index == .unreachable_value) { const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand)); } @@ -19545,8 +19545,8 @@ fn reifyStruct( else opt_val; break :blk try payload_val.copy(new_decl_arena_allocator); - } else Value.initTag(.unreachable_value); - if (is_comptime_val.toBool(mod) and default_val.tag() == .unreachable_value) { + } else Value.@"unreachable"; + if (is_comptime_val.toBool(mod) and default_val.ip_index == .unreachable_value) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } @@ -22579,7 +22579,7 @@ fn zirVarExtended( break :blk (try sema.resolveMaybeUndefVal(init)) orelse return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known"); - } else Value.initTag(.unreachable_value); + } else Value.@"unreachable"; try sema.validateVarType(block, ty_src, var_ty, small.is_extern); @@ -23080,7 +23080,7 @@ fn zirBuiltinExtern( const new_var = try new_decl_arena_allocator.create(Module.Var); new_var.* = .{ .owner_decl = sema.owner_decl_index, - .init = Value.initTag(.unreachable_value), + .init = Value.@"unreachable", .is_extern = true, .is_mutable = false, .is_threadlocal = options.is_thread_local, @@ -25736,7 +25736,7 @@ fn coerceExtra( } } else { in_memory_result = .{ .ptr_sentinel = .{ - .actual = Value.initTag(.unreachable_value), + .actual = Value.@"unreachable", .wanted = dest_sent, .ty = dst_elem_type, } }; @@ -26116,26 +26116,28 @@ fn coerceExtra( .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { - switch (inst_val.tag()) { + switch (inst_val.ip_index) { .undef => return sema.addConstUndef(dest_ty), - .eu_payload => { - const payload = try sema.addConstant( - inst_ty.errorUnionPayload(), - inst_val.castTag(.eu_payload).?.data, - ); - return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { - error.NotCoercible => break :eu, - else => |e| return e, - }; - }, - else => { - const error_set = try sema.addConstant( - inst_ty.errorUnionSet(), - inst_val, - ); - return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); + .none => switch (inst_val.tag()) { + .eu_payload => { + const payload = try sema.addConstant( + inst_ty.errorUnionPayload(), + inst_val.castTag(.eu_payload).?.data, + ); + return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { + error.NotCoercible => break :eu, + else => |e| return e, + }; + }, + else => {}, }, + else => {}, } + const error_set = try sema.addConstant( + inst_ty.errorUnionSet(), + inst_val, + ); + return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); } }, .ErrorSet => { @@ -26413,7 +26415,7 @@ const InMemoryCoercionResult = union(enum) { break; }, .array_sentinel => |sentinel| { - if (sentinel.actual.tag() != .unreachable_value) { + if (sentinel.actual.ip_index != .unreachable_value) { try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), }); @@ -26539,7 +26541,7 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_sentinel => |sentinel| { - if (sentinel.actual.tag() != .unreachable_value) { + if (sentinel.actual.ip_index != .unreachable_value) { try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), }); @@ -26747,8 +26749,8 @@ fn coerceInMemoryAllowed( dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, mod)); if (!ok_sent) { return InMemoryCoercionResult{ .array_sentinel = .{ - .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), - .wanted = dest_info.sentinel orelse Value.initTag(.unreachable_value), + .actual = src_info.sentinel orelse Value.@"unreachable", + .wanted = dest_info.sentinel orelse Value.@"unreachable", .ty = dest_info.elem_type, } }; } @@ -27129,8 +27131,8 @@ fn coerceInMemoryAllowedPtrs( dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type, sema.mod)); if (!ok_sent) { return InMemoryCoercionResult{ .ptr_sentinel = .{ - .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), - .wanted = dest_info.sentinel orelse Value.initTag(.unreachable_value), + .actual = src_info.sentinel orelse Value.@"unreachable", + .wanted = dest_info.sentinel orelse Value.@"unreachable", .ty = dest_info.pointee_type, } }; } @@ -27540,7 +27542,7 @@ fn beginComptimePtrMutation( }; } - switch (val_ptr.tag()) { + switch (val_ptr.ip_index) { .undef => { // An array has been initialized to undefined at comptime and now we // are for the first time setting an element. We must change the representation @@ -27565,127 +27567,130 @@ fn beginComptimePtrMutation( parent.decl_ref_mut, ); }, - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + .none => switch (val_ptr.tag()) { + .bytes => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `bytes` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const bytes = val_ptr.castTag(.bytes).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + // bytes.len may be one greater than dest_len because of the case when + // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. + assert(bytes.len >= dest_len); + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (elems, 0..) |*elem, i| { + elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); + } - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .str_lit => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `str_lit` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (bytes, 0..) |byte, i| { - elems[i] = try Value.Tag.int_u64.create(arena, byte); - } - if (parent.ty.sentinel(mod)) |sent_val| { - assert(elems.len == bytes.len + 1); - elems[bytes.len] = sent_val; - } + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .str_lit => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `str_lit` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const str_lit = val_ptr.castTag(.str_lit).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (bytes, 0..) |byte, i| { + elems[i] = try Value.Tag.int_u64.create(arena, byte); + } + if (parent.ty.sentinel(mod)) |sent_val| { + assert(elems.len == bytes.len + 1); + elems[bytes.len] = sent_val; + } - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .repeated => { + // An array is memory-optimized to store only a single element value, and + // that value is understood to be the same for the entire length of the array. + // However, now we want to modify an individual field and so the + // representation has to change. If we wanted to avoid this, there would + // need to be special detection elsewhere to identify when writing a value to an + // array element that is stored using the `repeated` tag, and handle it + // without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + if (elems.len > 0) elems[0] = repeated_val; + for (elems[1..]) |*elem| { + elem.* = try repeated_val.copy(arena); + } - const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - if (elems.len > 0) elems[0] = repeated_val; - for (elems[1..]) |*elem| { - elem.* = try repeated_val.copy(arena); - } + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, - return beginComptimePtrMutationInner( + .aggregate => return beginComptimePtrMutationInner( sema, block, src, elem_ty, - &elems[elem_ptr.index], + &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], ptr_elem_ty, parent.decl_ref_mut, - ); - }, + ), - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ), + .the_only_possible_value => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, - .the_only_possible_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); + else => unreachable, }, - else => unreachable, } }, @@ -27738,7 +27743,7 @@ fn beginComptimePtrMutation( var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); switch (parent.pointee) { - .direct => |val_ptr| switch (val_ptr.tag()) { + .direct => |val_ptr| switch (val_ptr.ip_index) { .undef => { // A struct or union has been initialized to undefined at comptime and now we // are for the first time setting a field. We must change the representation @@ -27815,72 +27820,75 @@ fn beginComptimePtrMutation( else => unreachable, } }, - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - - .@"union" => { - // We need to set the active field of the union. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); - - return beginComptimePtrMutationInner( + .none => switch (val_ptr.tag()) { + .aggregate => return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index), - &payload.val, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .slice => switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), - &val_ptr.castTag(.slice).?.data.ptr, + &val_ptr.castTag(.aggregate).?.data[field_index], ptr_elem_ty, parent.decl_ref_mut, ), - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), + .@"union" => { + // We need to set the active field of the union. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - else => unreachable, - }, + const payload = &val_ptr.castTag(.@"union").?.data; + payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); - .empty_struct_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index), + &payload.val, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .slice => switch (field_index) { + Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ), + + Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.decl_ref_mut, + ), + + else => unreachable, + }, + + .empty_struct_value => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index), + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + else => unreachable, + }, else => unreachable, }, .reinterpret => |reinterpret| { @@ -27951,7 +27959,7 @@ fn beginComptimePtrMutation( switch (parent.pointee) { .direct => |val_ptr| { const payload_ty = parent.ty.optionalChild(mod); - switch (val_ptr.tag()) { + switch (val_ptr.ip_index) { .undef, .null_value => { // An optional has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the @@ -27973,12 +27981,19 @@ fn beginComptimePtrMutation( .ty = payload_ty, }; }, - .opt_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, - }, + .none => switch (val_ptr.tag()) { + .opt_payload => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, + else => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, + }, + }, else => return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .pointee = .{ .direct = val_ptr }, @@ -28092,231 +28107,236 @@ fn beginComptimePtrLoad( ) ComptimePtrLoadError!ComptimePtrLoadKit { const mod = sema.mod; const target = sema.mod.getTarget(); - var deref: ComptimePtrLoadKit = switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - => blk: { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }; - const is_mutable = ptr_val.tag() == .decl_ref_mut; - const decl = sema.mod.declPtr(decl_index); - const decl_tv = try decl.typedValue(); - if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; - - const layout_defined = decl.ty.hasWellDefinedLayout(mod); - break :blk ComptimePtrLoadKit{ - .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, - .pointee = decl_tv, - .is_mutable = is_mutable, - .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, - }; + + var deref: ComptimePtrLoadKit = switch (ptr_val.ip_index) { + .null_value => { + return sema.fail(block, src, "attempt to use null value", .{}); }, - .elem_ptr => blk: { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const elem_ty = elem_ptr.elem_ty; - var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null); + .none => switch (ptr_val.tag()) { + .decl_ref, + .decl_ref_mut, + => blk: { + const decl_index = switch (ptr_val.tag()) { + .decl_ref => ptr_val.castTag(.decl_ref).?.data, + .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, + else => unreachable, + }; + const is_mutable = ptr_val.tag() == .decl_ref_mut; + const decl = sema.mod.declPtr(decl_index); + const decl_tv = try decl.typedValue(); + if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; + + const layout_defined = decl.ty.hasWellDefinedLayout(mod); + break :blk ComptimePtrLoadKit{ + .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, + .pointee = decl_tv, + .is_mutable = is_mutable, + .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, + }; + }, - // This code assumes that elem_ptrs have been "flattened" in order for direct dereference - // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that - // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { - assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, sema.mod))); - } + .elem_ptr => blk: { + const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; + const elem_ty = elem_ptr.elem_ty; + var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null); + + // This code assumes that elem_ptrs have been "flattened" in order for direct dereference + // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that + // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" + if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { + assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, sema.mod))); + } + + if (elem_ptr.index != 0) { + if (elem_ty.hasWellDefinedLayout(mod)) { + if (deref.parent) |*parent| { + // Update the byte offset (in-place) + const elem_size = try sema.typeAbiSize(elem_ty); + const offset = parent.byte_offset + elem_size * elem_ptr.index; + parent.byte_offset = try sema.usizeCast(block, src, offset); + } + } else { + deref.parent = null; + deref.ty_without_well_defined_layout = elem_ty; + } + } + + // If we're loading an elem_ptr that was derived from a different type + // than the true type of the underlying decl, we cannot deref directly + const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { + const deref_elem_ty = deref.pointee.?.ty.childType(mod); + break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; + } else false; + if (!ty_matches) { + deref.pointee = null; + break :blk deref; + } + + var array_tv = deref.pointee.?; + const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); + if (maybe_array_ty) |load_ty| { + // It's possible that we're loading a [N]T, in which case we'd like to slice + // the pointee array directly from our parent array. + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) { + const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); + deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ + .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), + .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), + } else null; + break :blk deref; + } + } + + if (elem_ptr.index >= check_len) { + deref.pointee = null; + break :blk deref; + } + if (elem_ptr.index == check_len - 1) { + if (array_tv.ty.sentinel(mod)) |sent| { + deref.pointee = TypedValue{ + .ty = elem_ty, + .val = sent, + }; + break :blk deref; + } + } + deref.pointee = TypedValue{ + .ty = elem_ty, + .val = try array_tv.val.elemValue(sema.mod, sema.arena, elem_ptr.index), + }; + break :blk deref; + }, - if (elem_ptr.index != 0) { - if (elem_ty.hasWellDefinedLayout(mod)) { - if (deref.parent) |*parent| { + .slice => blk: { + const slice = ptr_val.castTag(.slice).?.data; + break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null); + }, + + .field_ptr => blk: { + const field_ptr = ptr_val.castTag(.field_ptr).?.data; + const field_index = @intCast(u32, field_ptr.field_index); + var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); + + if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { + const struct_ty = field_ptr.container_ty.castTag(.@"struct"); + if (struct_ty != null and struct_ty.?.data.layout == .Packed) { + // packed structs are not byte addressable + deref.parent = null; + } else if (deref.parent) |*parent| { // Update the byte offset (in-place) - const elem_size = try sema.typeAbiSize(elem_ty); - const offset = parent.byte_offset + elem_size * elem_ptr.index; - parent.byte_offset = try sema.usizeCast(block, src, offset); + try sema.resolveTypeLayout(field_ptr.container_ty); + const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod); + parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); } } else { deref.parent = null; - deref.ty_without_well_defined_layout = elem_ty; + deref.ty_without_well_defined_layout = field_ptr.container_ty; } - } - // If we're loading an elem_ptr that was derived from a different type - // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { - const deref_elem_ty = deref.pointee.?.ty.childType(mod); - break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; - } else false; - if (!ty_matches) { - deref.pointee = null; - break :blk deref; - } - - var array_tv = deref.pointee.?; - const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); - if (maybe_array_ty) |load_ty| { - // It's possible that we're loading a [N]T, in which case we'd like to slice - // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) { - const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); - deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), - .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), - } else null; + const tv = deref.pointee orelse { + deref.pointee = null; + break :blk deref; + }; + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; + if (!coerce_in_mem_ok) { + deref.pointee = null; break :blk deref; } - } - if (elem_ptr.index >= check_len) { - deref.pointee = null; - break :blk deref; - } - if (elem_ptr.index == check_len - 1) { - if (array_tv.ty.sentinel(mod)) |sent| { + if (field_ptr.container_ty.isSlice(mod)) { + const slice_val = tv.val.castTag(.slice).?.data; + deref.pointee = switch (field_index) { + Value.Payload.Slice.ptr_index => TypedValue{ + .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), + .val = slice_val.ptr, + }, + Value.Payload.Slice.len_index => TypedValue{ + .ty = Type.usize, + .val = slice_val.len, + }, + else => unreachable, + }; + } else { + const field_ty = field_ptr.container_ty.structFieldType(field_index); deref.pointee = TypedValue{ - .ty = elem_ty, - .val = sent, + .ty = field_ty, + .val = tv.val.fieldValue(tv.ty, mod, field_index), }; - break :blk deref; } - } - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = try array_tv.val.elemValue(sema.mod, sema.arena, elem_ptr.index), - }; - break :blk deref; - }, + break :blk deref; + }, - .slice => blk: { - const slice = ptr_val.castTag(.slice).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null); - }, + .comptime_field_ptr => blk: { + const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data; + break :blk ComptimePtrLoadKit{ + .parent = null, + .pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val }, + .is_mutable = false, + .ty_without_well_defined_layout = comptime_field_ptr.field_ty, + }; + }, - .field_ptr => blk: { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); - var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); + .opt_payload_ptr, + .eu_payload_ptr, + => blk: { + const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; + const payload_ty = switch (ptr_val.tag()) { + .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), + .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), + else => unreachable, + }; + var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); - if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { - const struct_ty = field_ptr.container_ty.castTag(.@"struct"); - if (struct_ty != null and struct_ty.?.data.layout == .Packed) { - // packed structs are not byte addressable + // eu_payload_ptr and opt_payload_ptr never have a well-defined layout + if (deref.parent != null) { deref.parent = null; - } else if (deref.parent) |*parent| { - // Update the byte offset (in-place) - try sema.resolveTypeLayout(field_ptr.container_ty); - const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod); - parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); + deref.ty_without_well_defined_layout = payload_ptr.container_ty; } - } else { - deref.parent = null; - deref.ty_without_well_defined_layout = field_ptr.container_ty; - } - const tv = deref.pointee orelse { - deref.pointee = null; - break :blk deref; - }; - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; - if (!coerce_in_mem_ok) { + if (deref.pointee) |*tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + const payload_val = switch (ptr_val.tag()) { + .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { + return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); + }, + .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { + if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); + break :opt tv.val; + }, + else => unreachable, + }; + tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; + break :blk deref; + } + } deref.pointee = null; break :blk deref; - } - - if (field_ptr.container_ty.isSlice(mod)) { - const slice_val = tv.val.castTag(.slice).?.data; - deref.pointee = switch (field_index) { - Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), - .val = slice_val.ptr, - }, - Value.Payload.Slice.len_index => TypedValue{ - .ty = Type.usize, - .val = slice_val.len, - }, - else => unreachable, - }; - } else { - const field_ty = field_ptr.container_ty.structFieldType(field_index); - deref.pointee = TypedValue{ - .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, mod, field_index), - }; - } - break :blk deref; - }, - - .comptime_field_ptr => blk: { - const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data; - break :blk ComptimePtrLoadKit{ - .parent = null, - .pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val }, - .is_mutable = false, - .ty_without_well_defined_layout = comptime_field_ptr.field_ty, - }; - }, - - .opt_payload_ptr, - .eu_payload_ptr, - => blk: { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - const payload_ty = switch (ptr_val.tag()) { - .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), - .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), - else => unreachable, - }; - var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); + }, + .opt_payload => blk: { + const opt_payload = ptr_val.castTag(.opt_payload).?.data; + break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); + }, - // eu_payload_ptr and opt_payload_ptr never have a well-defined layout - if (deref.parent != null) { - deref.parent = null; - deref.ty_without_well_defined_layout = payload_ptr.container_ty; - } + .zero, + .one, + .int_u64, + .int_i64, + .int_big_positive, + .int_big_negative, + .variable, + .extern_fn, + .function, + => return error.RuntimeLoad, - if (deref.pointee) |*tv| { - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - const payload_val = switch (ptr_val.tag()) { - .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { - return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); - }, - .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); - break :opt tv.val; - }, - else => unreachable, - }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; - break :blk deref; - } - } - deref.pointee = null; - break :blk deref; - }, - .null_value => { - return sema.fail(block, src, "attempt to use null value", .{}); - }, - .opt_payload => blk: { - const opt_payload = ptr_val.castTag(.opt_payload).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); + else => unreachable, }, - - .zero, - .one, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - .variable, - .extern_fn, - .function, - => return error.RuntimeLoad, - else => unreachable, }; @@ -28953,7 +28973,7 @@ fn coerceTupleToStruct( const field_name = fields.keys()[i]; const field = fields.values()[i]; const field_src = inst_src; // TODO better source location - if (field.default_val.tag() == .unreachable_value) { + if (field.default_val.ip_index == .unreachable_value) { const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { @@ -29023,7 +29043,7 @@ fn coerceTupleToTuple( const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); field_refs[field_index] = coerced; - if (default_val.tag() != .unreachable_value) { + if (default_val.ip_index != .unreachable_value) { const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; @@ -29052,7 +29072,7 @@ fn coerceTupleToTuple( const field_ty = tuple_ty.structFieldType(i); const field_src = inst_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { + if (default_val.ip_index == .unreachable_value) { if (tuple_ty.isTuple()) { const template = "missing tuple field: {d}"; if (root_msg) |msg| { @@ -31557,7 +31577,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; + const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) { return true; } @@ -32141,7 +32161,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void gop.value_ptr.* = .{ .ty = Type.noreturn, .abi_align = 0, - .default_val = Value.initTag(.unreachable_value), + .default_val = Value.@"unreachable", .is_comptime = is_comptime, .offset = undefined, }; @@ -32965,7 +32985,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { => return null, .void => return Value.void, - .noreturn => return Value.initTag(.unreachable_value), + .noreturn => return Value.@"unreachable", .null => return Value.null, .undefined => return Value.undef, @@ -33027,7 +33047,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.values, 0..) |val, i| { - const is_comptime = val.tag() != .unreachable_value; + const is_comptime = val.ip_index != .unreachable_value; if (is_comptime) continue; if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue; return null; @@ -33059,7 +33079,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } switch (enum_obj.fields.count()) { - 0 => return Value.initTag(.unreachable_value), + 0 => return Value.@"unreachable", 1 => if (enum_obj.values.count() == 0) { return Value.zero; // auto-numbered } else { @@ -33072,7 +33092,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const resolved_ty = try sema.resolveTypeFields(ty); const enum_simple = resolved_ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { - 0 => return Value.initTag(.unreachable_value), + 0 => return Value.@"unreachable", 1 => return Value.zero, else => return null, } @@ -33091,7 +33111,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse return null; const fields = union_obj.fields.values(); - if (fields.len == 0) return Value.initTag(.unreachable_value); + if (fields.len == 0) return Value.@"unreachable"; const only_field = fields[0]; if (only_field.ty.eql(resolved_ty, sema.mod)) { const msg = try Module.ErrorMsg.create( @@ -33600,7 +33620,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; + const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) { return true; } @@ -33814,7 +33834,7 @@ fn numberAddWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const mod = sema.mod; if (ty.zigTypeTag(mod) == .ComptimeInt) { @@ -33874,7 +33894,7 @@ fn numberSubWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const mod = sema.mod; if (ty.zigTypeTag(mod) == .ComptimeInt) { @@ -34156,12 +34176,16 @@ fn intFitsInType( ) CompileError!bool { const mod = sema.mod; const target = mod.getTarget(); - switch (val.tag()) { - .zero, + switch (val.ip_index) { .undef, + .zero, + .zero_usize, + .zero_u8, => return true, - .one => switch (ty.zigTypeTag(mod)) { + .one, + .one_usize, + => switch (ty.zigTypeTag(mod)) { .Int => { const info = ty.intInfo(mod); return switch (info.signedness) { @@ -34173,111 +34197,129 @@ fn intFitsInType( else => unreachable, }, - .lazy_align => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); - // If it is u16 or bigger we know the alignment fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; + .none => switch (val.tag()) { + .zero => return true, + + .one => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + return switch (info.signedness) { + .signed => info.bits >= 2, + .unsigned => info.bits >= 1, + }; + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .lazy_size => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); - // If it is u64 or bigger we know the size fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; + + .lazy_align => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); + // If it is u16 or bigger we know the alignment fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + .ComptimeInt => return true, + else => unreachable, + }, + .lazy_size => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); + // If it is u64 or bigger we know the size fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_u64 => switch (ty.zigTypeTag(mod)) { - .Int => { - const x = val.castTag(.int_u64).?.data; - if (x == 0) return true; - const info = ty.intInfo(mod); - const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= needed_bits; + .int_u64 => switch (ty.zigTypeTag(mod)) { + .Int => { + const x = val.castTag(.int_u64).?.data; + if (x == 0) return true; + const info = ty.intInfo(mod); + const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= needed_bits; + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_i64 => switch (ty.zigTypeTag(mod)) { - .Int => { - const x = val.castTag(.int_i64).?.data; - if (x == 0) return true; - const info = ty.intInfo(mod); - if (info.signedness == .unsigned and x < 0) - return false; - var buffer: Value.BigIntSpace = undefined; - return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits); + .int_i64 => switch (ty.zigTypeTag(mod)) { + .Int => { + const x = val.castTag(.int_i64).?.data; + if (x == 0) return true; + const info = ty.intInfo(mod); + if (info.signedness == .unsigned and x < 0) + return false; + var buffer: Value.BigIntSpace = undefined; + return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits); + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_positive => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); + .int_big_positive => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_negative => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); + .int_big_negative => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .the_only_possible_value => { - assert(ty.intInfo(mod).bits == 0); - return true; - }, + .the_only_possible_value => { + assert(ty.intInfo(mod).bits == 0); + return true; + }, - .decl_ref_mut, - .extern_fn, - .decl_ref, - .function, - .variable, - => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const ptr_bits = target.ptrBitWidth(); - return switch (info.signedness) { - .signed => info.bits > ptr_bits, - .unsigned => info.bits >= ptr_bits, - }; + .decl_ref_mut, + .extern_fn, + .decl_ref, + .function, + .variable, + => switch (ty.zigTypeTag(mod)) { + .Int => { + const info = ty.intInfo(mod); + const ptr_bits = target.ptrBitWidth(); + return switch (info.signedness) { + .signed => info.bits > ptr_bits, + .unsigned => info.bits >= ptr_bits, + }; + }, + .ComptimeInt => return true, + else => unreachable, }, - .ComptimeInt => return true, - else => unreachable, - }, - .aggregate => { - assert(ty.zigTypeTag(mod) == .Vector); - for (val.castTag(.aggregate).?.data, 0..) |elem, i| { - if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) { - if (vector_index) |some| some.* = i; - return false; + .aggregate => { + assert(ty.zigTypeTag(mod) == .Vector); + for (val.castTag(.aggregate).?.data, 0..) |elem, i| { + if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) { + if (vector_index) |some| some.* = i; + return false; + } } - } - return true; + return true; + }, + + else => unreachable, }, - else => unreachable, + else => @panic("TODO"), } } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 7f599caafb..0efd396373 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -76,34 +76,236 @@ pub fn print( if (val.isVariable(mod)) return writer.writeAll("(variable)"); - while (true) switch (val.tag()) { - .empty_struct_value, .aggregate => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - if (ty.zigTypeTag(mod) == .Struct) { - try writer.writeAll(".{"); - const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); + while (true) switch (val.ip_index) { + .none => switch (val.tag()) { + .empty_struct_value, .aggregate => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + if (ty.zigTypeTag(mod) == .Struct) { + try writer.writeAll(".{"); + const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - switch (ty.tag()) { - .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), - else => {}, + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + switch (ty.tag()) { + .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), + else => {}, + } + try print(.{ + .ty = ty.structFieldType(i), + .val = val.fieldValue(ty, mod, i), + }, writer, level - 1, mod); } + if (ty.structFieldCount() > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll("}"); + } else { + const elem_ty = ty.elemType2(mod); + const len = ty.arrayLen(mod); + + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @intCast(usize, std.math.min(len, max_string_len)); + var buf: [max_string_len]u8 = undefined; + + var i: u32 = 0; + while (i < max_len) : (i += 1) { + const elem = val.fieldValue(ty, mod, i); + if (elem.isUndef()) break :str; + buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; + } + + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + + try writer.writeAll(".{ "); + + const max_len = std.math.min(len, max_aggregate_items); + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + try print(.{ + .ty = elem_ty, + .val = val.fieldValue(ty, mod, i), + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + } + }, + .@"union" => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const union_val = val.castTag(.@"union").?.data; + try writer.writeAll(".{ "); + + try print(.{ + .ty = ty.cast(Type.Payload.Union).?.data.tag_ty, + .val = union_val.tag, + }, writer, level - 1, mod); + try writer.writeAll(" = "); + try print(.{ + .ty = ty.unionFieldType(union_val.tag, mod), + .val = union_val.val, + }, writer, level - 1, mod); + + return writer.writeAll(" }"); + }, + .zero => return writer.writeAll("0"), + .one => return writer.writeAll("1"), + .the_only_possible_value => return writer.writeAll("0"), + .ty => return val.castTag(.ty).?.data.print(writer, mod), + .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), + .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), + .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), + .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), + .lazy_align => { + const sub_ty = val.castTag(.lazy_align).?.data; + const x = sub_ty.abiAlignment(mod); + return writer.print("{d}", .{x}); + }, + .lazy_size => { + const sub_ty = val.castTag(.lazy_size).?.data; + const x = sub_ty.abiSize(mod); + return writer.print("{d}", .{x}); + }, + .function => return writer.print("(function '{s}')", .{ + mod.declPtr(val.castTag(.function).?.data.owner_decl).name, + }), + .extern_fn => return writer.writeAll("(extern function)"), + .variable => unreachable, + .decl_ref_mut => { + const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; + const decl = mod.declPtr(decl_index); + if (level == 0) { + return writer.print("(decl ref mut '{s}')", .{decl.name}); + } + return print(.{ + .ty = decl.ty, + .val = decl.val, + }, writer, level - 1, mod); + }, + .decl_ref => { + const decl_index = val.castTag(.decl_ref).?.data; + const decl = mod.declPtr(decl_index); + if (level == 0) { + return writer.print("(decl ref '{s}')", .{decl.name}); + } + return print(.{ + .ty = decl.ty, + .val = decl.val, + }, writer, level - 1, mod); + }, + .comptime_field_ptr => { + const payload = val.castTag(.comptime_field_ptr).?.data; + if (level == 0) { + return writer.writeAll("(comptime field ptr)"); + } + return print(.{ + .ty = payload.field_ty, + .val = payload.field_val, + }, writer, level - 1, mod); + }, + .elem_ptr => { + const elem_ptr = val.castTag(.elem_ptr).?.data; + try writer.writeAll("&"); + if (level == 0) { + try writer.writeAll("(ptr)"); + } else { try print(.{ - .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, mod, i), + .ty = elem_ptr.elem_ty, + .val = elem_ptr.array_ptr, }, writer, level - 1, mod); } - if (ty.structFieldCount() > max_aggregate_items) { + return writer.print("[{}]", .{elem_ptr.index}); + }, + .field_ptr => { + const field_ptr = val.castTag(.field_ptr).?.data; + try writer.writeAll("&"); + if (level == 0) { + try writer.writeAll("(ptr)"); + } else { + try print(.{ + .ty = field_ptr.container_ty, + .val = field_ptr.container_ptr, + }, writer, level - 1, mod); + } + + if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { + switch (field_ptr.container_ty.tag()) { + .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), + else => { + const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index); + return writer.print(".{s}", .{field_name}); + }, + } + } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { + const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; + return writer.print(".{s}", .{field_name}); + } else if (field_ptr.container_ty.isSlice(mod)) { + switch (field_ptr.field_index) { + Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), + Value.Payload.Slice.len_index => return writer.writeAll(".len"), + else => unreachable, + } + } + }, + .empty_array => return writer.writeAll(".{}"), + .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), + .enum_field_index => { + return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data)}); + }, + .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), + .str_lit => { + const str_lit = val.castTag(.str_lit).?.data; + const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); + }, + .repeated => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + var i: u32 = 0; + try writer.writeAll(".{ "); + const elem_tv = TypedValue{ + .ty = ty.elemType2(mod), + .val = val.castTag(.repeated).?.data, + }; + const len = ty.arrayLen(mod); + const max_len = std.math.min(len, max_aggregate_items); + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + try print(elem_tv, writer, level - 1, mod); + } + if (len > max_aggregate_items) { try writer.writeAll(", ..."); } - return writer.writeAll("}"); - } else { + return writer.writeAll(" }"); + }, + .empty_array_sentinel => { + if (level == 0) { + return writer.writeAll(".{ (sentinel) }"); + } + try writer.writeAll(".{ "); + try print(.{ + .ty = ty.elemType2(mod), + .val = ty.sentinel(mod).?, + }, writer, level - 1, mod); + return writer.writeAll(" }"); + }, + .slice => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const payload = val.castTag(.slice).?.data; const elem_ty = ty.elemType2(mod); - const len = ty.arrayLen(mod); + const len = payload.len.toUnsignedInt(mod); if (elem_ty.eql(Type.u8, mod)) str: { const max_len = @intCast(usize, std.math.min(len, max_string_len)); @@ -111,11 +313,13 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, mod, i); - if (elem.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; + var elem_buf: Value.ElemValueBuffer = undefined; + const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); + if (elem_val.isUndef()) break :str; + buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } + // TODO would be nice if this had a bit of unicode awareness. const truncated = if (len > max_string_len) " (truncated)" else ""; return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); } @@ -126,292 +330,91 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); + var buf: Value.ElemValueBuffer = undefined; try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, mod, i), + .val = payload.ptr.elemValueBuffer(mod, i, &buf), }, writer, level - 1, mod); } if (len > max_aggregate_items) { try writer.writeAll(", ..."); } return writer.writeAll(" }"); - } - }, - .@"union" => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const union_val = val.castTag(.@"union").?.data; - try writer.writeAll(".{ "); - - try print(.{ - .ty = ty.cast(Type.Payload.Union).?.data.tag_ty, - .val = union_val.tag, - }, writer, level - 1, mod); - try writer.writeAll(" = "); - try print(.{ - .ty = ty.unionFieldType(union_val.tag, mod), - .val = union_val.val, - }, writer, level - 1, mod); - - return writer.writeAll(" }"); - }, - .null_value => return writer.writeAll("null"), - .undef => return writer.writeAll("undefined"), - .zero => return writer.writeAll("0"), - .one => return writer.writeAll("1"), - .unreachable_value => return writer.writeAll("unreachable"), - .the_only_possible_value => return writer.writeAll("0"), - .ty => return val.castTag(.ty).?.data.print(writer, mod), - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), - .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), - .lazy_align => { - const sub_ty = val.castTag(.lazy_align).?.data; - const x = sub_ty.abiAlignment(mod); - return writer.print("{d}", .{x}); - }, - .lazy_size => { - const sub_ty = val.castTag(.lazy_size).?.data; - const x = sub_ty.abiSize(mod); - return writer.print("{d}", .{x}); - }, - .function => return writer.print("(function '{s}')", .{ - mod.declPtr(val.castTag(.function).?.data.owner_decl).name, - }), - .extern_fn => return writer.writeAll("(extern function)"), - .variable => unreachable, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref mut '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (level == 0) { - return writer.writeAll("(comptime field ptr)"); - } - return print(.{ - .ty = payload.field_ty, - .val = payload.field_val, - }, writer, level - 1, mod); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { + }, + .float_16 => return writer.print("{d}", .{val.castTag(.float_16).?.data}), + .float_32 => return writer.print("{d}", .{val.castTag(.float_32).?.data}), + .float_64 => return writer.print("{d}", .{val.castTag(.float_64).?.data}), + .float_80 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_80).?.data)}), + .float_128 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_128).?.data)}), + .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), + .eu_payload => { + val = val.castTag(.eu_payload).?.data; + ty = ty.errorUnionPayload(); + }, + .opt_payload => { + val = val.castTag(.opt_payload).?.data; + ty = ty.optionalChild(mod); + return print(.{ .ty = ty, .val = val }, writer, level, mod); + }, + .eu_payload_ptr => { + try writer.writeAll("&"); + + const data = val.castTag(.eu_payload_ptr).?.data; + + var ty_val: Value.Payload.Ty = .{ + .base = .{ .tag = .ty }, + .data = ty, + }; + + try writer.writeAll("@as("); try print(.{ - .ty = elem_ptr.elem_ty, - .val = elem_ptr.array_ptr, + .ty = Type.type, + .val = Value.initPayload(&ty_val.base), }, writer, level - 1, mod); - } - return writer.print("[{}]", .{elem_ptr.index}); - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { + + try writer.writeAll(", &(payload of "); + try print(.{ - .ty = field_ptr.container_ty, - .val = field_ptr.container_ptr, + .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), + .val = data.container_ptr, }, writer, level - 1, mod); - } - - if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { - switch (field_ptr.container_ty.tag()) { - .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), - else => { - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index); - return writer.print(".{s}", .{field_name}); - }, - } - } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { - const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; - return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.isSlice(mod)) { - switch (field_ptr.field_index) { - Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), - Value.Payload.Slice.len_index => return writer.writeAll(".len"), - else => unreachable, - } - } - }, - .empty_array => return writer.writeAll(".{}"), - .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), - .enum_field_index => { - return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data)}); - }, - .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); - }, - .repeated => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - var i: u32 = 0; - try writer.writeAll(".{ "); - const elem_tv = TypedValue{ - .ty = ty.elemType2(mod), - .val = val.castTag(.repeated).?.data, - }; - const len = ty.arrayLen(mod); - const max_len = std.math.min(len, max_aggregate_items); - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - try print(elem_tv, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .empty_array_sentinel => { - if (level == 0) { - return writer.writeAll(".{ (sentinel) }"); - } - try writer.writeAll(".{ "); - try print(.{ - .ty = ty.elemType2(mod), - .val = ty.sentinel(mod).?, - }, writer, level - 1, mod); - return writer.writeAll(" }"); - }, - .slice => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const payload = val.castTag(.slice).?.data; - const elem_ty = ty.elemType2(mod); - const len = payload.len.toUnsignedInt(mod); - - if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); - var buf: [max_string_len]u8 = undefined; - var i: u32 = 0; - while (i < max_len) : (i += 1) { - var elem_buf: Value.ElemValueBuffer = undefined; - const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); - if (elem_val.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; - } - - // TODO would be nice if this had a bit of unicode awareness. - const truncated = if (len > max_string_len) " (truncated)" else ""; - return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); - } + try writer.writeAll("))"); + return; + }, + .opt_payload_ptr => { + const data = val.castTag(.opt_payload_ptr).?.data; - try writer.writeAll(".{ "); + var ty_val: Value.Payload.Ty = .{ + .base = .{ .tag = .ty }, + .data = ty, + }; - const max_len = std.math.min(len, max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - var buf: Value.ElemValueBuffer = undefined; + try writer.writeAll("@as("); try print(.{ - .ty = elem_ty, - .val = payload.ptr.elemValueBuffer(mod, i, &buf), + .ty = Type.type, + .val = Value.initPayload(&ty_val.base), }, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .float_16 => return writer.print("{d}", .{val.castTag(.float_16).?.data}), - .float_32 => return writer.print("{d}", .{val.castTag(.float_32).?.data}), - .float_64 => return writer.print("{d}", .{val.castTag(.float_64).?.data}), - .float_80 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_80).?.data)}), - .float_128 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_128).?.data)}), - .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), - .eu_payload => { - val = val.castTag(.eu_payload).?.data; - ty = ty.errorUnionPayload(); - }, - .opt_payload => { - val = val.castTag(.opt_payload).?.data; - ty = ty.optionalChild(mod); - return print(.{ .ty = ty, .val = val }, writer, level, mod); - }, - .eu_payload_ptr => { - try writer.writeAll("&"); - const data = val.castTag(.eu_payload_ptr).?.data; + try writer.writeAll(", &(payload of "); - var ty_val: Value.Payload.Ty = .{ - .base = .{ .tag = .ty }, - .data = ty, - }; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = Value.initPayload(&ty_val.base), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); + try print(.{ + .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), + .val = data.container_ptr, + }, writer, level - 1, mod); - try print(.{ - .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), - .val = data.container_ptr, - }, writer, level - 1, mod); + try writer.writeAll("))"); + return; + }, - try writer.writeAll("))"); - return; + // TODO these should not appear in this function + .inferred_alloc => return writer.writeAll("(inferred allocation value)"), + .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), + .runtime_value => return writer.writeAll("[runtime value]"), }, - .opt_payload_ptr => { - const data = val.castTag(.opt_payload_ptr).?.data; - - var ty_val: Value.Payload.Ty = .{ - .base = .{ .tag = .ty }, - .data = ty, - }; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = Value.initPayload(&ty_val.base), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - try print(.{ - .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); + else => { + try writer.print("(interned: {})", .{val.ip_index}); return; }, - - // TODO these should not appear in this function - .inferred_alloc => return writer.writeAll("(inferred allocation value)"), - .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), - .runtime_value => return writer.writeAll("[runtime value]"), }; } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 96304628e9..ea7134c603 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3088,11 +3088,15 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { 64 => return WValue{ .float64 = val.toFloat(f64) }, else => unreachable, }, - .Pointer => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), - .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, - .zero, .null_value => return WValue{ .imm32 = 0 }, - else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), + .Pointer => switch (val.ip_index) { + .null_value => return WValue{ .imm32 = 0 }, + .none => switch (val.tag()) { + .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), + .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, + .zero => return WValue{ .imm32 = 0 }, + else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), + }, + else => unreachable, }, .Enum => { if (val.castTag(.enum_field_index)) |field_index| { diff --git a/src/codegen.zig b/src/codegen.zig index a807400502..25e8d892d8 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -312,7 +312,7 @@ pub fn generateSymbol( ), }, }, - .Pointer => switch (typed_value.val.tag()) { + .Pointer => switch (typed_value.val.ip_index) { .null_value => { switch (target.ptrBitWidth()) { 32 => { @@ -327,76 +327,79 @@ pub fn generateSymbol( } return Result.ok; }, - .zero, .one, .int_u64, .int_big_positive => { - switch (target.ptrBitWidth()) { - 32 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); - }, - 64 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - }, - else => unreachable, - } - return Result.ok; - }, - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( - bin_file, - src_loc, - typed_value, - switch (tag) { - .variable => typed_value.val.castTag(.variable).?.data.owner_decl, - .decl_ref => typed_value.val.castTag(.decl_ref).?.data, - .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, + .none => switch (typed_value.val.tag()) { + .zero, .one, .int_u64, .int_big_positive => { + switch (target.ptrBitWidth()) { + 32 => { + const x = typed_value.val.toUnsignedInt(mod); + mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); + }, + 64 => { + const x = typed_value.val.toUnsignedInt(mod); + mem.writeInt(u64, try code.addManyAsArray(8), x, endian); + }, + else => unreachable, + } + return Result.ok; }, - code, - debug_output, - reloc_info, - ), - .slice => { - const slice = typed_value.val.castTag(.slice).?.data; + .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( + bin_file, + src_loc, + typed_value, + switch (tag) { + .variable => typed_value.val.castTag(.variable).?.data.owner_decl, + .decl_ref => typed_value.val.castTag(.decl_ref).?.data, + .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index, + else => unreachable, + }, + code, + debug_output, + reloc_info, + ), + .slice => { + const slice = typed_value.val.castTag(.slice).?.data; - // generate ptr - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = slice.ptr, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + // generate ptr + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = slice_ptr_field_type, + .val = slice.ptr, + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } - // generate length - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.usize, - .val = slice.len, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + // generate length + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = Type.usize, + .val = slice.len, + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } - return Result.ok; - }, - .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr( - bin_file, - src_loc, - typed_value, - typed_value.val, - code, - debug_output, - reloc_info, - ), - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, + return Result.ok; + }, + .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr( + bin_file, src_loc, - "TODO implement generateSymbol for pointer type value: '{s}'", - .{@tagName(typed_value.val.tag())}, + typed_value, + typed_value.val, + code, + debug_output, + reloc_info, ), + else => return Result{ + .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement generateSymbol for pointer type value: '{s}'", + .{@tagName(typed_value.val.tag())}, + ), + }, }, + else => unreachable, }, .Int => { const info = typed_value.ty.intInfo(mod); @@ -652,7 +655,7 @@ pub fn generateSymbol( } const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; - const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef); + const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.undef; switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, .val = value, @@ -696,7 +699,7 @@ pub fn generateSymbol( // emit payload part of the error union { const begin = code.items.len; - const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.initTag(.undef); + const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.undef; switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_ty, .val = payload_val, @@ -1189,16 +1192,17 @@ pub fn genTypedValue( .Void => return GenResult.mcv(.none), .Pointer => switch (typed_value.ty.ptrSize(mod)) { .Slice => {}, - else => { - switch (typed_value.val.tag()) { - .null_value => { - return GenResult.mcv(.{ .immediate = 0 }); - }, + else => switch (typed_value.val.ip_index) { + .null_value => { + return GenResult.mcv(.{ .immediate = 0 }); + }, + .none => switch (typed_value.val.tag()) { .int_u64 => { return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, else => {}, - } + }, + else => {}, }, }, .Int => { @@ -1216,7 +1220,7 @@ pub fn genTypedValue( }, .Optional => { if (typed_value.ty.isPtrLikeOptional(mod)) { - if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); + if (typed_value.val.ip_index == .null_value) return GenResult.mcv(.{ .immediate = 0 }); return genTypedValue(bin_file, src_loc, .{ .ty = typed_value.ty.optionalChild(mod), diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e6ec461e43..cd3974bc91 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1045,8 +1045,8 @@ pub const DeclGen = struct { if (!empty) try writer.writeByte(')'); return; }, - .Pointer => switch (val.tag()) { - .null_value, .zero => if (ty.isSlice(mod)) { + .Pointer => switch (val.ip_index) { + .null_value => if (ty.isSlice(mod)) { var slice_pl = Value.Payload.Slice{ .base = .{ .tag = .slice }, .data = .{ .ptr = val, .len = Value.undef }, @@ -1059,46 +1059,63 @@ pub const DeclGen = struct { try dg.renderType(writer, ty); try writer.writeAll(")NULL)"); }, - .variable => { - const decl = val.castTag(.variable).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - .slice => { - if (!location.isInitializer()) { - try writer.writeByte('('); + .none => switch (val.tag()) { + .zero => if (ty.isSlice(mod)) { + var slice_pl = Value.Payload.Slice{ + .base = .{ .tag = .slice }, + .data = .{ .ptr = val, .len = Value.undef }, + }; + const slice_val = Value.initPayload(&slice_pl.base); + + return dg.renderValue(writer, ty, slice_val, location); + } else { + try writer.writeAll("(("); try dg.renderType(writer, ty); - try writer.writeByte(')'); - } + try writer.writeAll(")NULL)"); + }, + .variable => { + const decl = val.castTag(.variable).?.data.owner_decl; + return dg.renderDeclValue(writer, ty, val, decl, location); + }, + .slice => { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } - const slice = val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const slice = val.castTag(.slice).?.data; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type); - try writer.writeAll(", "); - try dg.renderValue(writer, Type.usize, slice.len, initializer_type); - try writer.writeByte('}'); - }, - .function => { - const func = val.castTag(.function).?.data; - try dg.renderDeclName(writer, func.owner_decl, 0); - }, - .extern_fn => { - const extern_fn = val.castTag(.extern_fn).?.data; - try dg.renderDeclName(writer, extern_fn.owner_decl, 0); - }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + try writer.writeByte('{'); + try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type); + try writer.writeAll(", "); + try dg.renderValue(writer, Type.usize, slice.len, initializer_type); + try writer.writeByte('}'); + }, + .function => { + const func = val.castTag(.function).?.data; + try dg.renderDeclName(writer, func.owner_decl, 0); + }, + .extern_fn => { + const extern_fn = val.castTag(.extern_fn).?.data; + try dg.renderDeclName(writer, extern_fn.owner_decl, 0); + }, + .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + }, + .field_ptr, + .elem_ptr, + .opt_payload_ptr, + .eu_payload_ptr, + .decl_ref_mut, + .decl_ref, + => try dg.renderParentPtr(writer, val, ty, location), + + else => unreachable, }, - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), else => unreachable, }, .Array, .Vector => { @@ -1109,8 +1126,8 @@ pub const DeclGen = struct { } // First try specific tag representations for more efficiency. - switch (val.tag()) { - .undef, .empty_struct_value, .empty_array => { + switch (val.ip_index) { + .undef => { const ai = ty.arrayInfo(mod); try writer.writeByte('{'); if (ai.sentinel) |s| { @@ -1119,76 +1136,91 @@ pub const DeclGen = struct { try writer.writeByte('0'); } try writer.writeByte('}'); + return; }, - .bytes, .str_lit => |t| { - const bytes = switch (t) { - .bytes => val.castTag(.bytes).?.data, - .str_lit => bytes: { - const str_lit = val.castTag(.str_lit).?.data; - break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - }, - else => unreachable, - }; - const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; - try writer.print("{s}", .{ - fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel), - }); - }, - else => { - // Fall back to generic implementation. - var arena = std.heap.ArenaAllocator.init(dg.gpa); - defer arena.deinit(); - const arena_allocator = arena.allocator(); - - // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal - const max_string_initializer_len = 65535; - - const ai = ty.arrayInfo(mod); - if (ai.elem_type.eql(Type.u8, dg.module)) { - if (ai.len <= max_string_initializer_len) { - var literal = stringLiteral(writer); - try literal.start(); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); - try literal.writeChar(elem_val_u8); - } - if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); - if (s_u8 != 0) try literal.writeChar(s_u8); - } - try literal.end(); - } else { - try writer.writeByte('{'); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); - try writer.print("'\\x{x}'", .{elem_val_u8}); - } - if (ai.sentinel) |s| { - if (index != 0) try writer.writeByte(','); - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } - try writer.writeByte('}'); - } - } else { + .none => switch (val.tag()) { + .empty_struct_value, .empty_array => { + const ai = ty.arrayInfo(mod); try writer.writeByte('{'); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); - } if (ai.sentinel) |s| { - if (index != 0) try writer.writeByte(','); try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } else { + try writer.writeByte('0'); } try writer.writeByte('}'); - } + return; + }, + .bytes, .str_lit => |t| { + const bytes = switch (t) { + .bytes => val.castTag(.bytes).?.data, + .str_lit => bytes: { + const str_lit = val.castTag(.str_lit).?.data; + break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + }, + else => unreachable, + }; + const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; + try writer.print("{s}", .{ + fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel), + }); + return; + }, + else => {}, }, + else => {}, + } + // Fall back to generic implementation. + var arena = std.heap.ArenaAllocator.init(dg.gpa); + defer arena.deinit(); + const arena_allocator = arena.allocator(); + + // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal + const max_string_initializer_len = 65535; + + const ai = ty.arrayInfo(mod); + if (ai.elem_type.eql(Type.u8, dg.module)) { + if (ai.len <= max_string_initializer_len) { + var literal = stringLiteral(writer); + try literal.start(); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try literal.writeChar(elem_val_u8); + } + if (ai.sentinel) |s| { + const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); + if (s_u8 != 0) try literal.writeChar(s_u8); + } + try literal.end(); + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try writer.print("'\\x{x}'", .{elem_val_u8}); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); + } + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(dg.module, arena_allocator, index); + try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); } }, .Bool => { @@ -1201,7 +1233,7 @@ pub const DeclGen = struct { .Optional => { const payload_ty = ty.optionalChild(mod); - const is_null_val = Value.makeBool(val.tag() == .null_value); + const is_null_val = Value.makeBool(val.ip_index == .null_value); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return dg.renderValue(writer, Type.bool, is_null_val, location); @@ -7765,7 +7797,7 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) T if (lowersToArray(ret_ty, mod)) { buffer.names = [1][]const u8{"array"}; buffer.types = [1]Type{ret_ty}; - buffer.values = [1]Value{Value.initTag(.unreachable_value)}; + buffer.values = [1]Value{Value.@"unreachable"}; buffer.payload = .{ .data = .{ .names = &buffer.names, .types = &buffer.types, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index f45a63df72..558534a651 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2028,7 +2028,7 @@ pub const Object = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; const field_size = field_ty.abiSize(mod); const field_align = field_ty.abiAlignment(mod); @@ -2498,7 +2498,7 @@ pub const DeclGen = struct { global.setGlobalConstant(.True); break :init_val decl.val; }; - if (init_val.tag() != .unreachable_value) { + if (init_val.ip_index != .unreachable_value) { const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val }); if (global.globalGetValueType() == llvm_init.typeOf()) { global.setInitializer(llvm_init); @@ -2954,7 +2954,7 @@ pub const DeclGen = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); @@ -3359,58 +3359,65 @@ pub const DeclGen = struct { else => unreachable, } }, - .Pointer => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - .variable => { - const decl_index = tv.val.castTag(.variable).?.data.owner_decl; - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); - - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - - const val = try dg.resolveGlobalDecl(decl_index); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) - else - val; - return addrspace_casted_ptr; - }, - .slice => { - const slice = tv.val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const fields: [2]*llvm.Value = .{ - try dg.lowerValue(.{ - .ty = tv.ty.slicePtrFieldType(&buf), - .val = slice.ptr, - }), - try dg.lowerValue(.{ - .ty = Type.usize, - .val = slice.len, - }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); - return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); - }, - .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); - }, - .null_value, .zero => { + .Pointer => switch (tv.val.ip_index) { + .null_value => { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.constNull(); }, - .opt_payload => { - const payload = tv.val.castTag(.opt_payload).?.data; - return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); + .none => switch (tv.val.tag()) { + .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), + .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), + .variable => { + const decl_index = tv.val.castTag(.variable).?.data.owner_decl; + const decl = dg.module.declPtr(decl_index); + dg.module.markDeclAlive(decl); + + const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); + const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); + + const val = try dg.resolveGlobalDecl(decl_index); + const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) + val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) + else + val; + return addrspace_casted_ptr; + }, + .slice => { + const slice = tv.val.castTag(.slice).?.data; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + const fields: [2]*llvm.Value = .{ + try dg.lowerValue(.{ + .ty = tv.ty.slicePtrFieldType(&buf), + .val = slice.ptr, + }), + try dg.lowerValue(.{ + .ty = Type.usize, + .val = slice.len, + }), + }; + return dg.context.constStruct(&fields, fields.len, .False); + }, + .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { + const llvm_usize = try dg.lowerType(Type.usize); + const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); + return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); + }, + .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { + return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); + }, + .zero => { + const llvm_type = try dg.lowerType(tv.ty); + return llvm_type.constNull(); + }, + .opt_payload => { + const payload = tv.val.castTag(.opt_payload).?.data; + return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); + }, + else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ + tv.ty.fmtDebug(), tag, + }), }, - else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ - tv.ty.fmtDebug(), tag, - }), + else => unreachable, }, .Array => switch (tv.val.tag()) { .bytes => { @@ -3555,7 +3562,7 @@ pub const DeclGen = struct { var fields_buf: [3]*llvm.Value = undefined; fields_buf[0] = try dg.lowerValue(.{ .ty = payload_ty, - .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef), + .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.undef, }); fields_buf[1] = non_null_bit; if (llvm_field_count > 2) { @@ -3606,7 +3613,7 @@ pub const DeclGen = struct { }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, - .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), + .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.undef, }); var fields_buf: [3]*llvm.Value = undefined; @@ -3645,7 +3652,7 @@ pub const DeclGen = struct { var need_unnamed = false; for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].tag() != .unreachable_value) continue; + if (tuple.values[i].ip_index != .unreachable_value) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const field_align = field_ty.abiAlignment(mod); @@ -10501,7 +10508,7 @@ fn llvmFieldIndex( const tuple = ty.tupleFields(); var llvm_field_index: c_uint = 0; for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + if (tuple.values[i].ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; const field_align = field_ty.abiAlignment(mod); big_align = @max(big_align, field_align); @@ -11117,7 +11124,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { const tuple = ty.tupleFields(); var count: usize = 0; for (tuple.values, 0..) |field_val, i| { - if (field_val.tag() != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue; + if (field_val.ip_index != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue; count += 1; if (count > max_fields_byval) return true; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 9de2c03142..5fa81d19ff 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -674,7 +674,7 @@ pub const DeclGen = struct { try self.lower(ptr_ty, slice.ptr); try self.addInt(Type.usize, slice.len); }, - .null_value, .zero => try self.addNullPtr(try dg.resolveType(ty, .indirect)), + .zero => try self.addNullPtr(try dg.resolveType(ty, .indirect)), .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { try self.addInt(Type.usize, val); }, @@ -813,7 +813,8 @@ pub const DeclGen = struct { const error_size = Type.anyerror.abiAlignment(mod); const ty_size = ty.abiSize(mod); const padding = ty_size - payload_size - error_size; - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); + + const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; if (eu_layout.error_first) { try self.lower(Type.anyerror, error_val); @@ -1021,7 +1022,7 @@ pub const DeclGen = struct { return try self.constant(Type.anyerror, error_val, repr); } - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); + const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; var members: [2]IdRef = undefined; if (eu_layout.error_first) { @@ -1292,7 +1293,7 @@ pub const DeclGen = struct { var member_index: usize = 0; for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field_ty, .indirect); member_index += 1; @@ -1596,7 +1597,7 @@ pub const DeclGen = struct { else decl.val; - if (init_val.tag() == .unreachable_value) { + if (init_val.ip_index == .unreachable_value) { return self.todo("importing extern variables", .{}); } diff --git a/src/type.zig b/src/type.zig index 1f970919c9..8cffddb31c 100644 --- a/src/type.zig +++ b/src/type.zig @@ -533,14 +533,14 @@ pub const Type = struct { for (a_tuple.values, 0..) |a_val, i| { const ty = a_tuple.types[i]; const b_val = b_tuple.values[i]; - if (a_val.tag() == .unreachable_value) { - if (b_val.tag() == .unreachable_value) { + if (a_val.ip_index == .unreachable_value) { + if (b_val.ip_index == .unreachable_value) { continue; } else { return false; } } else { - if (b_val.tag() == .unreachable_value) { + if (b_val.ip_index == .unreachable_value) { return false; } else { if (!Value.eql(a_val, b_val, ty, mod)) return false; @@ -569,14 +569,14 @@ pub const Type = struct { for (a_struct_obj.values, 0..) |a_val, i| { const ty = a_struct_obj.types[i]; const b_val = b_struct_obj.values[i]; - if (a_val.tag() == .unreachable_value) { - if (b_val.tag() == .unreachable_value) { + if (a_val.ip_index == .unreachable_value) { + if (b_val.ip_index == .unreachable_value) { continue; } else { return false; } } else { - if (b_val.tag() == .unreachable_value) { + if (b_val.ip_index == .unreachable_value) { return false; } else { if (!Value.eql(a_val, b_val, ty, mod)) return false; @@ -750,7 +750,7 @@ pub const Type = struct { for (tuple.types, 0..) |field_ty, i| { hashWithHasher(field_ty, hasher, mod); const field_val = tuple.values[i]; - if (field_val.tag() == .unreachable_value) continue; + if (field_val.ip_index == .unreachable_value) continue; field_val.hash(field_ty, hasher, mod); } }, @@ -764,7 +764,7 @@ pub const Type = struct { const field_val = struct_obj.values[i]; hasher.update(field_name); hashWithHasher(field_ty, hasher, mod); - if (field_val.tag() == .unreachable_value) continue; + if (field_val.ip_index == .unreachable_value) continue; field_val.hash(field_ty, hasher, mod); } }, @@ -1139,11 +1139,11 @@ pub const Type = struct { for (tuple.types, 0..) |field_ty, i| { if (i != 0) try writer.writeAll(", "); const val = tuple.values[i]; - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.writeAll("comptime "); } try field_ty.dump("", .{}, writer); - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.print(" = {}", .{val.fmtDebug()}); } } @@ -1156,13 +1156,13 @@ pub const Type = struct { for (anon_struct.types, 0..) |field_ty, i| { if (i != 0) try writer.writeAll(", "); const val = anon_struct.values[i]; - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.writeAll("comptime "); } try writer.writeAll(anon_struct.names[i]); try writer.writeAll(": "); try field_ty.dump("", .{}, writer); - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.print(" = {}", .{val.fmtDebug()}); } } @@ -1408,11 +1408,11 @@ pub const Type = struct { for (tuple.types, 0..) |field_ty, i| { if (i != 0) try writer.writeAll(", "); const val = tuple.values[i]; - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.writeAll("comptime "); } try print(field_ty, writer, mod); - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); } } @@ -1425,7 +1425,7 @@ pub const Type = struct { for (anon_struct.types, 0..) |field_ty, i| { if (i != 0) try writer.writeAll(", "); const val = anon_struct.values[i]; - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.writeAll("comptime "); } try writer.writeAll(anon_struct.names[i]); @@ -1433,7 +1433,7 @@ pub const Type = struct { try print(field_ty, writer, mod); - if (val.tag() != .unreachable_value) { + if (val.ip_index != .unreachable_value) { try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); } } @@ -1770,7 +1770,7 @@ pub const Type = struct { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { const val = tuple.values[i]; - if (val.tag() != .unreachable_value) continue; // comptime field + if (val.ip_index != .unreachable_value) continue; // comptime field if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; } return false; @@ -2283,7 +2283,7 @@ pub const Type = struct { var big_align: u32 = 0; for (tuple.types, 0..) |field_ty, i| { const val = tuple.values[i]; - if (val.tag() != .unreachable_value) continue; // comptime field + if (val.ip_index != .unreachable_value) continue; // comptime field if (!(field_ty.hasRuntimeBits(mod))) continue; switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { @@ -3845,7 +3845,7 @@ pub const Type = struct { => return null, .void => return Value.void, - .noreturn => return Value.initTag(.unreachable_value), + .noreturn => return Value.@"unreachable", .null => return Value.null, .undefined => return Value.undef, @@ -3896,7 +3896,7 @@ pub const Type = struct { .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.values, 0..) |val, i| { - const is_comptime = val.tag() != .unreachable_value; + const is_comptime = val.ip_index != .unreachable_value; if (is_comptime) continue; if (tuple.types[i].onePossibleValue(mod) != null) continue; return null; @@ -3919,7 +3919,7 @@ pub const Type = struct { return null; } switch (enum_full.fields.count()) { - 0 => return Value.initTag(.unreachable_value), + 0 => return Value.@"unreachable", 1 => if (enum_full.values.count() == 0) { return Value.zero; // auto-numbered } else { @@ -3931,7 +3931,7 @@ pub const Type = struct { .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { - 0 => return Value.initTag(.unreachable_value), + 0 => return Value.@"unreachable", 1 => return Value.zero, else => return null, } @@ -3947,7 +3947,7 @@ pub const Type = struct { .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; const tag_val = union_obj.tag_ty.onePossibleValue(mod) orelse return null; - if (union_obj.fields.count() == 0) return Value.initTag(.unreachable_value); + if (union_obj.fields.count() == 0) return Value.@"unreachable"; const only_field = union_obj.fields.values()[0]; const val_val = only_field.ty.onePossibleValue(mod) orelse return null; _ = tag_val; @@ -4075,7 +4075,7 @@ pub const Type = struct { .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; + const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true; } return false; @@ -4514,7 +4514,7 @@ pub const Type = struct { .tuple => { const tuple = ty.castTag(.tuple).?.data; const val = tuple.values[index]; - if (val.tag() == .unreachable_value) { + if (val.ip_index == .unreachable_value) { return tuple.types[index].onePossibleValue(mod); } else { return val; @@ -4523,7 +4523,7 @@ pub const Type = struct { .anon_struct => { const anon_struct = ty.castTag(.anon_struct).?.data; const val = anon_struct.values[index]; - if (val.tag() == .unreachable_value) { + if (val.ip_index == .unreachable_value) { return anon_struct.types[index].onePossibleValue(mod); } else { return val; @@ -4544,12 +4544,12 @@ pub const Type = struct { .tuple => { const tuple = ty.castTag(.tuple).?.data; const val = tuple.values[index]; - return val.tag() != .unreachable_value; + return val.ip_index != .unreachable_value; }, .anon_struct => { const anon_struct = ty.castTag(.anon_struct).?.data; const val = anon_struct.values[index]; - return val.tag() != .unreachable_value; + return val.ip_index != .unreachable_value; }, else => unreachable, } @@ -4647,7 +4647,7 @@ pub const Type = struct { for (tuple.types, 0..) |field_ty, i| { const field_val = tuple.values[i]; - if (field_val.tag() != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { // comptime field if (i == index) return offset; continue; diff --git a/src/value.zig b/src/value.zig index 6f7210c884..f1d706aa09 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,13 +33,10 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - undef, zero, one, - unreachable_value, /// The only possible value for a particular type, which is stored externally. the_only_possible_value, - null_value, empty_struct_value, empty_array, // See last_no_payload_tag below. @@ -132,14 +129,11 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .undef, .zero, .one, - .unreachable_value, .the_only_possible_value, .empty_struct_value, .empty_array, - .null_value, => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), .int_big_positive, @@ -287,13 +281,10 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .undef, .zero, .one, - .unreachable_value, .the_only_possible_value, .empty_array, - .null_value, .empty_struct_value, => unreachable, @@ -522,7 +513,7 @@ pub const Value = struct { ) !void { comptime assert(fmt.len == 0); if (start_val.ip_index != .none) { - try out_stream.print("(interned {d})", .{@enumToInt(start_val.ip_index)}); + try out_stream.print("(interned: {})", .{start_val.ip_index}); return; } var val = start_val; @@ -534,11 +525,8 @@ pub const Value = struct { .@"union" => { return out_stream.writeAll("(union value)"); }, - .null_value => return out_stream.writeAll("null"), - .undef => return out_stream.writeAll("undefined"), .zero => return out_stream.writeAll("0"), .one => return out_stream.writeAll("1"), - .unreachable_value => return out_stream.writeAll("unreachable"), .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), .ty => return val.castTag(.ty).?.data.dump("", options, out_stream), .lazy_align => { @@ -811,8 +799,9 @@ pub const Value = struct { switch (val.ip_index) { .bool_false => return BigIntMutable.init(&space.limbs, 0).toConst(), .bool_true => return BigIntMutable.init(&space.limbs, 1).toConst(), + .undef => unreachable, + .null_value => return BigIntMutable.init(&space.limbs, 0).toConst(), .none => switch (val.tag()) { - .null_value, .zero, .the_only_possible_value, // i0, u0 => return BigIntMutable.init(&space.limbs, 0).toConst(), @@ -832,8 +821,6 @@ pub const Value = struct { .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt(), .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt(), - .undef => unreachable, - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -880,6 +867,7 @@ pub const Value = struct { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, + .undef => unreachable, .none => switch (val.tag()) { .zero, .the_only_possible_value, // i0, u0 @@ -892,8 +880,6 @@ pub const Value = struct { .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null, .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null, - .undef => unreachable, - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -913,9 +899,9 @@ pub const Value = struct { else => return null, }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| return int.big_int.to(u64) catch null, - else => unreachable, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| int.big_int.to(u64) catch null, + else => null, }, } } @@ -930,6 +916,7 @@ pub const Value = struct { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, + .undef => unreachable, .none => switch (val.tag()) { .zero, .the_only_possible_value, // i0, u0 @@ -951,7 +938,6 @@ pub const Value = struct { return @intCast(i64, ty.abiSize(mod)); }, - .undef => unreachable, else => unreachable, }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { @@ -2032,8 +2018,7 @@ pub const Value = struct { const a_tag = a.tag(); const b_tag = b.tag(); if (a_tag == b_tag) switch (a_tag) { - .undef => return true, - .null_value, .the_only_possible_value, .empty_struct_value => return true, + .the_only_possible_value, .empty_struct_value => return true, .enum_literal => { const a_name = a.castTag(.enum_literal).?.data; const b_name = b.castTag(.enum_literal).?.data; @@ -2162,9 +2147,7 @@ pub const Value = struct { return eqlAdvanced(a_union.val, active_field_ty, b_union.val, active_field_ty, mod, opt_sema); }, else => {}, - } else if (b_tag == .null_value or b_tag == .@"error") { - return false; - } else if (a_tag == .undef or b_tag == .undef) { + } else if (b_tag == .@"error") { return false; } @@ -2283,7 +2266,7 @@ pub const Value = struct { if (a_nan) return true; return a_float == b_float; }, - .Optional => if (a_tag != .null_value and b_tag == .opt_payload) { + .Optional => if (b_tag == .opt_payload) { var sub_pl: Payload.SubValue = .{ .base = .{ .tag = b.tag() }, .data = a, @@ -2301,7 +2284,7 @@ pub const Value = struct { }, else => {}, } - if (a_tag == .null_value or a_tag == .@"error") return false; + if (a_tag == .@"error") return false; return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } @@ -2642,7 +2625,6 @@ pub const Value = struct { .zero, .one, - .null_value, .int_u64, .int_i64, .int_big_positive, @@ -2717,102 +2699,108 @@ pub const Value = struct { arena: ?Allocator, buffer: *ElemValueBuffer, ) error{OutOfMemory}!Value { - switch (val.tag()) { - // This is the case of accessing an element of an undef array. + switch (val.ip_index) { .undef => return Value.undef, - .empty_array => unreachable, // out of bounds array index - .empty_struct_value => unreachable, // out of bounds array index + .none => switch (val.tag()) { + // This is the case of accessing an element of an undef array. + .empty_array => unreachable, // out of bounds array index + .empty_struct_value => unreachable, // out of bounds array index - .empty_array_sentinel => { - assert(index == 0); // The only valid index for an empty array with sentinel. - return val.castTag(.empty_array_sentinel).?.data; - }, + .empty_array_sentinel => { + assert(index == 0); // The only valid index for an empty array with sentinel. + return val.castTag(.empty_array_sentinel).?.data; + }, - .bytes => { - const byte = val.castTag(.bytes).?.data[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const byte = bytes[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } - }, + .bytes => { + const byte = val.castTag(.bytes).?.data[index]; + if (arena) |a| { + return Tag.int_u64.create(a, byte); + } else { + buffer.* = .{ + .base = .{ .tag = .int_u64 }, + .data = byte, + }; + return initPayload(&buffer.base); + } + }, + .str_lit => { + const str_lit = val.castTag(.str_lit).?.data; + const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const byte = bytes[index]; + if (arena) |a| { + return Tag.int_u64.create(a, byte); + } else { + buffer.* = .{ + .base = .{ .tag = .int_u64 }, + .data = byte, + }; + return initPayload(&buffer.base); + } + }, - // No matter the index; all the elements are the same! - .repeated => return val.castTag(.repeated).?.data, + // No matter the index; all the elements are the same! + .repeated => return val.castTag(.repeated).?.data, - .aggregate => return val.castTag(.aggregate).?.data[index], - .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer), + .aggregate => return val.castTag(.aggregate).?.data[index], + .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer), - .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer), - .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer), - .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValueAdvanced(mod, index, arena, buffer), - .elem_ptr => { - const data = val.castTag(.elem_ptr).?.data; - return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer); - }, - .field_ptr => { - const data = val.castTag(.field_ptr).?.data; - if (data.container_ptr.pointerDecl()) |decl_index| { - const container_decl = mod.declPtr(decl_index); - const field_type = data.container_ty.structFieldType(data.field_index); - const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); - return field_val.elemValueAdvanced(mod, index, arena, buffer); - } else unreachable; - }, + .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer), + .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer), + .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValueAdvanced(mod, index, arena, buffer), + .elem_ptr => { + const data = val.castTag(.elem_ptr).?.data; + return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer); + }, + .field_ptr => { + const data = val.castTag(.field_ptr).?.data; + if (data.container_ptr.pointerDecl()) |decl_index| { + const container_decl = mod.declPtr(decl_index); + const field_type = data.container_ty.structFieldType(data.field_index); + const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); + return field_val.elemValueAdvanced(mod, index, arena, buffer); + } else unreachable; + }, - // The child type of arrays which have only one possible value need - // to have only one possible value itself. - .the_only_possible_value => return val, + // The child type of arrays which have only one possible value need + // to have only one possible value itself. + .the_only_possible_value => return val, - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), + .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), + .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), - .opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), + .opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), + .eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), + else => unreachable, + }, else => unreachable, } } /// Returns true if a Value is backed by a variable pub fn isVariable(val: Value, mod: *Module) bool { - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isVariable(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isVariable(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isVariable(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isVariable(mod), - .decl_ref => { - const decl = mod.declPtr(val.castTag(.decl_ref).?.data); - assert(decl.has_tv); - return decl.val.isVariable(mod); - }, - .decl_ref_mut => { - const decl = mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index); - assert(decl.has_tv); - return decl.val.isVariable(mod); - }, + return switch (val.ip_index) { + .none => switch (val.tag()) { + .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), + .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), + .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isVariable(mod), + .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isVariable(mod), + .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isVariable(mod), + .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isVariable(mod), + .decl_ref => { + const decl = mod.declPtr(val.castTag(.decl_ref).?.data); + assert(decl.has_tv); + return decl.val.isVariable(mod); + }, + .decl_ref_mut => { + const decl = mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index); + assert(decl.has_tv); + return decl.val.isVariable(mod); + }, - .variable => true, + .variable => true, + else => false, + }, else => false, }; } @@ -2878,39 +2866,46 @@ pub const Value = struct { } pub fn fieldValue(val: Value, ty: Type, mod: *const Module, index: usize) Value { - switch (val.tag()) { - .aggregate => { - const field_values = val.castTag(.aggregate).?.data; - return field_values[index]; - }, - .@"union" => { - const payload = val.castTag(.@"union").?.data; - // TODO assert the tag is correct - return payload.val; - }, + switch (val.ip_index) { + .undef => return Value.undef, + .none => switch (val.tag()) { + .aggregate => { + const field_values = val.castTag(.aggregate).?.data; + return field_values[index]; + }, + .@"union" => { + const payload = val.castTag(.@"union").?.data; + // TODO assert the tag is correct + return payload.val; + }, - .the_only_possible_value => return ty.onePossibleValue(mod).?, + .the_only_possible_value => return ty.onePossibleValue(mod).?, - .empty_struct_value => { - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - return tuple.values[index]; - } - if (ty.structFieldValueComptime(mod, index)) |some| { - return some; - } - unreachable; - }, - .undef => return Value.undef, + .empty_struct_value => { + if (ty.isSimpleTupleOrAnonStruct()) { + const tuple = ty.tupleFields(); + return tuple.values[index]; + } + if (ty.structFieldValueComptime(mod, index)) |some| { + return some; + } + unreachable; + }, + else => unreachable, + }, else => unreachable, } } pub fn unionTag(val: Value) Value { - switch (val.tag()) { - .undef, .enum_field_index => return val, - .@"union" => return val.castTag(.@"union").?.data.tag, + switch (val.ip_index) { + .undef => return val, + .none => switch (val.tag()) { + .enum_field_index => return val, + .@"union" => return val.castTag(.@"union").?.data.tag, + else => unreachable, + }, else => unreachable, } } @@ -2946,15 +2941,15 @@ pub const Value = struct { }); } - pub fn isUndef(self: Value) bool { - return self.tag() == .undef; + pub fn isUndef(val: Value) bool { + return val.ip_index == .undef; } /// TODO: check for cases such as array that is not marked undef but all the element /// values are marked undef, or struct that is not marked undef but all fields are marked /// undef, etc. - pub fn isUndefDeep(self: Value) bool { - return self.isUndef(); + pub fn isUndefDeep(val: Value) bool { + return val.isUndef(); } /// Returns true if any value contained in `self` is undefined. @@ -2962,27 +2957,29 @@ pub const Value = struct { /// values are marked undef, or struct that is not marked undef but all fields are marked /// undef, etc. pub fn anyUndef(self: Value, mod: *Module) bool { - switch (self.tag()) { - .slice => { - const payload = self.castTag(.slice).?; - const len = payload.data.len.toUnsignedInt(mod); - - var elem_value_buf: ElemValueBuffer = undefined; - var i: usize = 0; - while (i < len) : (i += 1) { - const elem_val = payload.data.ptr.elemValueBuffer(mod, i, &elem_value_buf); - if (elem_val.anyUndef(mod)) return true; - } - }, + switch (self.ip_index) { + .undef => return true, + .none => switch (self.tag()) { + .slice => { + const payload = self.castTag(.slice).?; + const len = payload.data.len.toUnsignedInt(mod); + + var elem_value_buf: ElemValueBuffer = undefined; + var i: usize = 0; + while (i < len) : (i += 1) { + const elem_val = payload.data.ptr.elemValueBuffer(mod, i, &elem_value_buf); + if (elem_val.anyUndef(mod)) return true; + } + }, - .aggregate => { - const payload = self.castTag(.aggregate).?; - for (payload.data) |val| { - if (val.anyUndef(mod)) return true; - } + .aggregate => { + const payload = self.castTag(.aggregate).?; + for (payload.data) |val| { + if (val.anyUndef(mod)) return true; + } + }, + else => {}, }, - - .undef => return true, else => {}, } @@ -2992,30 +2989,33 @@ pub const Value = struct { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. pub fn isNull(self: Value, mod: *const Module) bool { - return switch (self.tag()) { + return switch (self.ip_index) { + .undef => unreachable, + .unreachable_value => unreachable, .null_value => true, - .opt_payload => false, + .none => switch (self.tag()) { + .opt_payload => false, - // If it's not one of those two tags then it must be a C pointer value, - // in which case the value 0 is null and other values are non-null. + // If it's not one of those two tags then it must be a C pointer value, + // in which case the value 0 is null and other values are non-null. - .zero, - .the_only_possible_value, - => true, + .zero, + .the_only_possible_value, + => true, - .one => false, + .one => false, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - => self.orderAgainstZero(mod).compare(.eq), + .int_u64, + .int_i64, + .int_big_positive, + .int_big_negative, + => self.orderAgainstZero(mod).compare(.eq), - .undef => unreachable, - .unreachable_value => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, + else => false, + }, else => false, }; } @@ -3025,18 +3025,21 @@ pub const Value = struct { /// something is an error or not because it works without having to figure out the /// string. pub fn getError(self: Value) ?[]const u8 { - return switch (self.tag()) { - .@"error" => self.castTag(.@"error").?.data.name, - .int_u64 => @panic("TODO"), - .int_i64 => @panic("TODO"), - .int_big_positive => @panic("TODO"), - .int_big_negative => @panic("TODO"), - .one => @panic("TODO"), + return switch (self.ip_index) { .undef => unreachable, .unreachable_value => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, + .none => switch (self.tag()) { + .@"error" => self.castTag(.@"error").?.data.name, + .int_u64 => @panic("TODO"), + .int_i64 => @panic("TODO"), + .int_big_positive => @panic("TODO"), + .int_big_negative => @panic("TODO"), + .one => @panic("TODO"), + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, + else => null, + }, else => null, }; } @@ -3044,13 +3047,16 @@ pub const Value = struct { /// Assumes the type is an error union. Returns true if and only if the value is /// the error union payload, not an error. pub fn errorUnionIsPayload(val: Value) bool { - return switch (val.tag()) { - .eu_payload => true, - else => false, - + return switch (val.ip_index) { .undef => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, + .none => switch (val.tag()) { + .eu_payload => true, + else => false, + + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, + }, + else => false, }; } @@ -3065,17 +3071,20 @@ pub const Value = struct { /// Valid for all types. Asserts the value is not undefined. pub fn isFloat(self: Value) bool { - return switch (self.tag()) { + return switch (self.ip_index) { .undef => unreachable, - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, + .none => switch (self.tag()) { + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, - .float_16, - .float_32, - .float_64, - .float_80, - .float_128, - => true, + .float_16, + .float_32, + .float_64, + .float_80, + .float_128, + => true, + else => false, + }, else => false, }; } @@ -3102,40 +3111,44 @@ pub const Value = struct { pub fn intToFloatScalar(val: Value, arena: Allocator, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { const target = mod.getTarget(); - switch (val.tag()) { - .undef, .zero, .one => return val, - .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 - .int_u64 => { - return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target); - }, - .int_i64 => { - return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target); - }, - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const float = bigIntToFloat(limbs, true); - return floatToValue(float, arena, float_ty, target); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - const float = bigIntToFloat(limbs, false); - return floatToValue(float, arena, float_ty, target); - }, - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); - } else { - return intToFloatInner(ty.abiAlignment(mod), arena, float_ty, target); - } - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); - } else { - return intToFloatInner(ty.abiSize(mod), arena, float_ty, target); - } + switch (val.ip_index) { + .undef => return val, + .none => switch (val.tag()) { + .zero, .one => return val, + .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 + .int_u64 => { + return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target); + }, + .int_i64 => { + return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target); + }, + .int_big_positive => { + const limbs = val.castTag(.int_big_positive).?.data; + const float = bigIntToFloat(limbs, true); + return floatToValue(float, arena, float_ty, target); + }, + .int_big_negative => { + const limbs = val.castTag(.int_big_negative).?.data; + const float = bigIntToFloat(limbs, false); + return floatToValue(float, arena, float_ty, target); + }, + .lazy_align => { + const ty = val.castTag(.lazy_align).?.data; + if (opt_sema) |sema| { + return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); + } else { + return intToFloatInner(ty.abiAlignment(mod), arena, float_ty, target); + } + }, + .lazy_size => { + const ty = val.castTag(.lazy_size).?.data; + if (opt_sema) |sema| { + return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, arena, float_ty, target); + } else { + return intToFloatInner(ty.abiSize(mod), arena, float_ty, target); + } + }, + else => unreachable, }, else => unreachable, } @@ -3381,7 +3394,7 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; if (ty.zigTypeTag(mod) == .ComptimeInt) { return intMul(lhs, rhs, ty, arena, mod); @@ -3492,7 +3505,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (val.isUndef()) return Value.initTag(.undef); + if (val.isUndef()) return Value.undef; const info = ty.intInfo(mod); @@ -3532,7 +3545,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -3568,7 +3581,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); @@ -3598,7 +3611,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -3633,7 +3646,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -5393,11 +5406,12 @@ pub const Value = struct { .ip_index = .none, .legacy = .{ .ptr_otherwise = &negative_one_payload.base }, }; - pub const undef = initTag(.undef); + pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; - pub const @"null" = initTag(.null_value); + pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined }; pub const @"true": Value = .{ .ip_index = .bool_true, .legacy = undefined }; + pub const @"unreachable": Value = .{ .ip_index = .unreachable_value, .legacy = undefined }; pub const generic_poison: Value = .{ .ip_index = .generic_poison, .legacy = undefined }; pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined }; -- cgit v1.2.3 From 9ec0017f460854300004ab263bf585c2d376d1fb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 16:32:38 -0700 Subject: stage2: migrate many pointer types to the InternPool --- src/Air.zig | 14 +++++--- src/InternPool.zig | 41 ++++++++++++++------- src/Sema.zig | 42 +++++++++++----------- src/arch/aarch64/CodeGen.zig | 8 +++-- src/arch/arm/CodeGen.zig | 8 +++-- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/codegen/c.zig | 12 +++---- src/codegen/llvm.zig | 10 +++--- src/codegen/spirv.zig | 13 ++++--- src/type.zig | 85 ++++++++++++++++++++++++++++++++------------ 11 files changed, 152 insertions(+), 85 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index 3c04d17073..43fc55e811 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1427,8 +1427,11 @@ pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const inst_index = ref_int - ref_start_index; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); - assert(air_tags[inst_index] == .const_ty); - return air_datas[inst_index].ty; + return switch (air_tags[inst_index]) { + .const_ty => air_datas[inst_index].ty, + .interned => air_datas[inst_index].interned.toType(), + else => unreachable, + }; } /// Returns the requested data, as well as the new index which is at the start of the @@ -1492,6 +1495,7 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *const Module) ?Value { switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], .const_ty => unreachable, + .interned => return air_datas[inst_index].interned.toValue(), else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } } @@ -1717,8 +1721,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { => false, .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0, - .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtr(), - .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtr(), - .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtr(), + .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), + .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip), + .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), }; } diff --git a/src/InternPool.zig b/src/InternPool.zig index 1da0572bd4..36afbadf3d 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -73,7 +73,7 @@ pub const Key = union(enum) { /// If zero use pointee_type.abiAlignment() /// When creating pointer types, if alignment is equal to pointee type /// abi alignment, this value should be set to 0 instead. - alignment: u16 = 0, + alignment: u64 = 0, /// If this is non-zero it means the pointer points to a sub-byte /// range of data, which is backed by a "host integer" with this /// number of bytes. @@ -90,9 +90,9 @@ pub const Key = union(enum) { /// an appropriate value for this field. address_space: std.builtin.AddressSpace = .generic, - pub const VectorIndex = enum(u32) { - none = std.math.maxInt(u32), - runtime = std.math.maxInt(u32) - 1, + pub const VectorIndex = enum(u16) { + none = std.math.maxInt(u16), + runtime = std.math.maxInt(u16) - 1, _, }; }; @@ -806,16 +806,33 @@ pub const Pointer = struct { sentinel: Index, flags: Flags, packed_offset: PackedOffset, - vector_index: VectorIndex, + + /// Stored as a power-of-two, with one special value to indicate none. + pub const Alignment = enum(u6) { + none = std.math.maxInt(u6), + _, + + pub fn toByteUnits(a: Alignment, default: u64) u64 { + return switch (a) { + .none => default, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + + pub fn fromByteUnits(n: u64) Alignment { + if (n == 0) return .none; + return @intToEnum(Alignment, @ctz(n)); + } + }; pub const Flags = packed struct(u32) { - alignment: u16, + size: Size, + alignment: Alignment, is_const: bool, is_volatile: bool, is_allowzero: bool, - size: Size, address_space: AddressSpace, - _: u7 = undefined, + vector_index: VectorIndex, }; pub const PackedOffset = packed struct(u32) { @@ -928,13 +945,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { return .{ .ptr_type = .{ .elem_type = ptr_info.child, .sentinel = ptr_info.sentinel, - .alignment = ptr_info.flags.alignment, + .alignment = ptr_info.flags.alignment.toByteUnits(0), .size = ptr_info.flags.size, .is_const = ptr_info.flags.is_const, .is_volatile = ptr_info.flags.is_volatile, .is_allowzero = ptr_info.flags.is_allowzero, .address_space = ptr_info.flags.address_space, - .vector_index = ptr_info.vector_index, + .vector_index = ptr_info.flags.vector_index, .host_size = ptr_info.packed_offset.host_size, .bit_offset = ptr_info.packed_offset.bit_offset, } }; @@ -1003,18 +1020,18 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .child = ptr_type.elem_type, .sentinel = ptr_type.sentinel, .flags = .{ - .alignment = ptr_type.alignment, + .alignment = Pointer.Alignment.fromByteUnits(ptr_type.alignment), .is_const = ptr_type.is_const, .is_volatile = ptr_type.is_volatile, .is_allowzero = ptr_type.is_allowzero, .size = ptr_type.size, .address_space = ptr_type.address_space, + .vector_index = ptr_type.vector_index, }, .packed_offset = .{ .host_size = ptr_type.host_size, .bit_offset = ptr_type.bit_offset, }, - .vector_index = ptr_type.vector_index, }), }); }, diff --git a/src/Sema.zig b/src/Sema.zig index 8abe6484ee..39f39b43d9 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8400,7 +8400,7 @@ fn analyzeOptionalPayloadPtr( const child_type = opt_type.optionalChild(mod); const child_pointer = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = child_type, - .mutable = !optional_ptr_ty.isConstPtr(), + .mutable = !optional_ptr_ty.isConstPtr(mod), .@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod), }); @@ -8594,7 +8594,7 @@ fn analyzeErrUnionPayloadPtr( const payload_ty = err_union_ty.errorUnionPayload(); const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, - .mutable = !operand_ty.isConstPtr(), + .mutable = !operand_ty.isConstPtr(mod), .@"addrspace" = operand_ty.ptrAddressSpace(mod), }); @@ -10147,7 +10147,7 @@ fn zirSwitchCapture( const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, .mutable = operand_ptr_ty.ptrIsMutable(mod), - .@"volatile" = operand_ptr_ty.isVolatilePtr(), + .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return sema.addConstant( @@ -10166,7 +10166,7 @@ fn zirSwitchCapture( const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, .mutable = operand_ptr_ty.ptrIsMutable(mod), - .@"volatile" = operand_ptr_ty.isVolatilePtr(), + .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty); @@ -15292,10 +15292,10 @@ fn zirCmpEq( } // comparing null with optionals - if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr())) { + if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, rhs, op == .neq); } - if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr())) { + if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, lhs, op == .neq); } @@ -22254,7 +22254,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const target = sema.mod.getTarget(); const mod = sema.mod; - if (dest_ty.isConstPtr()) { + if (dest_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{}); } @@ -22452,7 +22452,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_ptr_ty = sema.typeOf(dest_ptr); try checkMemOperand(sema, block, dest_src, dest_ptr_ty); - if (dest_ptr_ty.isConstPtr()) { + if (dest_ptr_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memset constant pointer", .{}); } @@ -24206,7 +24206,7 @@ fn fieldPtr( const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = slice_ptr_ty, .mutable = attr_ptr_ty.ptrIsMutable(mod), - .@"volatile" = attr_ptr_ty.isVolatilePtr(), + .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); @@ -24227,7 +24227,7 @@ fn fieldPtr( const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = Type.usize, .mutable = attr_ptr_ty.ptrIsMutable(mod), - .@"volatile" = attr_ptr_ty.isVolatilePtr(), + .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); @@ -24897,7 +24897,7 @@ fn unionFieldPtr( const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ .pointee_type = field.ty, .mutable = union_ptr_ty.ptrIsMutable(mod), - .@"volatile" = union_ptr_ty.isVolatilePtr(), + .@"volatile" = union_ptr_ty.isVolatilePtr(mod), .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod), }); const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); @@ -25239,7 +25239,7 @@ fn tupleFieldPtr( const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = field_ty, .mutable = tuple_ptr_ty.ptrIsMutable(mod), - .@"volatile" = tuple_ptr_ty.isVolatilePtr(), + .@"volatile" = tuple_ptr_ty.isVolatilePtr(mod), .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); @@ -25767,7 +25767,7 @@ fn coerceExtra( } // coercion from C pointer - if (inst_ty.isCPtr()) src_c_ptr: { + if (inst_ty.isCPtr(mod)) src_c_ptr: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr; // In this case we must add a safety check because the C pointer // could be null. @@ -27255,7 +27255,7 @@ fn storePtr2( ) CompileError!void { const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); - if (ptr_ty.isConstPtr()) + if (ptr_ty.isConstPtr(mod)) return sema.fail(block, ptr_src, "cannot assign to constant", .{}); const elem_ty = ptr_ty.childType(mod); @@ -29843,7 +29843,7 @@ fn analyzeSlice( const result = try block.addBitCast(return_ty, new_ptr); if (block.wantSafety()) { // requirement: slicing C ptr is non-null - if (ptr_ptr_child_ty.isCPtr()) { + if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } @@ -29902,7 +29902,7 @@ fn analyzeSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { // requirement: slicing C ptr is non-null - if (ptr_ptr_child_ty.isCPtr()) { + if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } @@ -30720,7 +30720,7 @@ fn resolvePeerTypes( err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); } } - seen_const = seen_const or chosen_ty.isConstPtr(); + seen_const = seen_const or chosen_ty.isConstPtr(mod); chosen = candidate; chosen_i = candidate_i + 1; continue; @@ -30876,12 +30876,12 @@ fn resolvePeerTypes( .Optional => { const opt_child_ty = candidate_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) { - seen_const = seen_const or opt_child_ty.isConstPtr(); + seen_const = seen_const or opt_child_ty.isConstPtr(mod); any_are_null = true; continue; } - seen_const = seen_const or chosen_ty.isConstPtr(); + seen_const = seen_const or chosen_ty.isConstPtr(mod); any_are_null = false; chosen = candidate; chosen_i = candidate_i + 1; @@ -30924,7 +30924,7 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr() and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { + .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr(mod) and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) { continue; } @@ -31023,7 +31023,7 @@ fn resolvePeerTypes( var info = chosen_ty.ptrInfo(mod); info.sentinel = chosen_child_ty.sentinel(mod); info.size = .Slice; - info.mutable = !(seen_const or chosen_child_ty.isConstPtr()); + info.mutable = !(seen_const or chosen_child_ty.isConstPtr(mod)); info.pointee_type = chosen_child_ty.elemType2(mod); const new_ptr_ty = try Type.ptr(sema.arena, mod, info); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 81169750c1..4a10691e02 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3430,9 +3430,10 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -3496,9 +3497,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -3869,7 +3871,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index c08cb58c48..3591ead53d 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2428,9 +2428,10 @@ fn ptrElemVal( } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = slice_ty.slicePtrFieldType(&buf); @@ -2527,9 +2528,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); - const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -2738,7 +2740,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 1e5858a948..1008d527f6 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1536,7 +1536,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f8a62f9798..83e4b4f93d 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1827,7 +1827,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index cd3974bc91..b0fb9fa480 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6117,7 +6117,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6159,7 +6159,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try writer.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6221,7 +6221,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { if (use_atomic) try writer.writeAll("zig_atomic("); try f.renderType(writer, ty); if (use_atomic) try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6265,7 +6265,7 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(", (zig_atomic("); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6299,7 +6299,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try writer.writeAll("zig_atomic_store((zig_atomic("); try f.renderType(writer, ty); try writer.writeByte(')'); - if (ptr_ty.isVolatilePtr()) try writer.writeAll(" volatile"); + if (ptr_ty.isVolatilePtr(mod)) try writer.writeAll(" volatile"); try writer.writeAll(" *)"); try f.writeCValue(writer, ptr, .Other); try writer.writeAll(", "); @@ -6365,7 +6365,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { return .none; } - if (elem_abi_size > 1 or dest_ty.isVolatilePtr()) { + if (elem_abi_size > 1 or dest_ty.isVolatilePtr(mod)) { // For the assignment in this loop, the array pointer needs to get // casted to a regular pointer, otherwise an error like this occurs: // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 558534a651..7fa9b74334 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -7046,7 +7046,7 @@ pub const FuncGen = struct { const elem_llvm_ty = try self.dg.lowerType(vector_ptr_ty.childType(mod)); const load_inst = self.builder.buildLoad(elem_llvm_ty, vector_ptr, ""); load_inst.setAlignment(vector_ptr_ty.ptrAlignment(mod)); - load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr())); + load_inst.setVolatile(llvm.Bool.fromBool(vector_ptr_ty.isVolatilePtr(mod))); break :blk load_inst; }; const modified_vector = self.builder.buildInsertElement(loaded_vector, operand, index, ""); @@ -8221,7 +8221,7 @@ pub const FuncGen = struct { const usize_llvm_ty = try self.dg.lowerType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); const dest_ptr_align = ptr_ty.ptrAlignment(mod); - _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr()); + _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr(mod)); if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } @@ -8497,7 +8497,7 @@ pub const FuncGen = struct { const dest_ptr_align = ptr_ty.ptrAlignment(mod); const u8_llvm_ty = self.context.intType(8); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); - const is_volatile = ptr_ty.isVolatilePtr(); + const is_volatile = ptr_ty.isVolatilePtr(mod); if (self.air.value(bin_op.rhs, mod)) |elem_val| { if (elem_val.isUndefDeep()) { @@ -8621,7 +8621,7 @@ pub const FuncGen = struct { const len = self.sliceOrArrayLenInBytes(dest_slice, dest_ptr_ty); const dest_ptr = self.sliceOrArrayPtr(dest_slice, dest_ptr_ty); const mod = self.dg.module; - const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr(); + const is_volatile = src_ptr_ty.isVolatilePtr(mod) or dest_ptr_ty.isVolatilePtr(mod); _ = self.builder.buildMemCpy( dest_ptr, dest_ptr_ty.ptrAlignment(mod), @@ -9894,7 +9894,7 @@ pub const FuncGen = struct { if (!info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) return null; const ptr_alignment = info.alignment(mod); - const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr()); + const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr(mod)); assert(info.vector_index != .runtime); if (info.vector_index != .none) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 5fa81d19ff..27a79c1c45 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1689,7 +1689,7 @@ pub const DeclGen = struct { const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect); const result_id = self.spv.allocId(); const access = spec.MemoryAccess.Extended{ - .Volatile = ptr_ty.isVolatilePtr(), + .Volatile = ptr_ty.isVolatilePtr(mod), }; try self.func.body.emit(self.spv.gpa, .OpLoad, .{ .id_result_type = self.typeId(indirect_value_ty_ref), @@ -1705,7 +1705,7 @@ pub const DeclGen = struct { const value_ty = ptr_ty.childType(mod); const indirect_value_id = try self.convertToIndirect(value_ty, value_id); const access = spec.MemoryAccess.Extended{ - .Volatile = ptr_ty.isVolatilePtr(), + .Volatile = ptr_ty.isVolatilePtr(mod), }; try self.func.body.emit(self.spv.gpa, .OpStore, .{ .pointer = ptr_id, @@ -2464,9 +2464,10 @@ pub const DeclGen = struct { } fn airSliceElemPtr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2479,9 +2480,10 @@ pub const DeclGen = struct { } fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; const slice_id = try self.resolve(bin_op.lhs); const index_id = try self.resolve(bin_op.rhs); @@ -2781,10 +2783,11 @@ pub const DeclGen = struct { } fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ptr_ty = self.typeOf(ty_op.operand); const operand = try self.resolve(ty_op.operand); - if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; + if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) return null; return try self.load(ptr_ty, operand); } diff --git a/src/type.zig b/src/type.zig index 4840bca6e7..db8c116f70 100644 --- a/src/type.zig +++ b/src/type.zig @@ -193,7 +193,7 @@ pub const Type = struct { .Frame, => false, - .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr()), + .Pointer => !ty.isSlice(mod) and (is_equality_cmp or ty.isCPtr(mod)), .Optional => { if (!is_equality_cmp) return false; return ty.optionalChild(mod).isSelfComparable(mod, is_equality_cmp); @@ -3012,38 +3012,59 @@ pub const Type = struct { } } - pub fn isConstPtr(self: Type) bool { - return switch (self.tag()) { - .pointer => !self.castTag(.pointer).?.data.mutable, - else => false, + pub fn isConstPtr(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => !ty.castTag(.pointer).?.data.mutable, + else => false, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.is_const, + else => false, + }, }; } - pub fn isVolatilePtr(self: Type) bool { - return switch (self.tag()) { - .pointer => { - const payload = self.castTag(.pointer).?.data; - return payload.@"volatile"; + pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { + return isVolatilePtrIp(ty, mod.intern_pool); + } + + pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.@"volatile", + else => false, + }, + else => switch (ip.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.is_volatile, + else => false, }, - else => false, }; } - pub fn isAllowzeroPtr(self: Type, mod: *const Module) bool { - return switch (self.tag()) { - .pointer => { - const payload = self.castTag(.pointer).?.data; - return payload.@"allowzero"; + pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.@"allowzero", + else => ty.zigTypeTag(mod) == .Optional, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.is_allowzero, + else => false, }, - else => return self.zigTypeTag(mod) == .Optional, }; } - pub fn isCPtr(self: Type) bool { - return switch (self.tag()) { - .pointer => self.castTag(.pointer).?.data.size == .C, - - else => return false, + pub fn isCPtr(ty: Type, mod: *const Module) bool { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => ty.castTag(.pointer).?.data.size == .C, + else => false, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.size == .C, + else => false, + }, }; } @@ -5063,7 +5084,7 @@ pub const Type = struct { return .{ .pointee_type = p.elem_type.toType(), .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null, - .@"align" = p.alignment, + .@"align" = @intCast(u32, p.alignment), .@"addrspace" = p.address_space, .bit_offset = p.bit_offset, .host_size = p.host_size, @@ -5248,6 +5269,24 @@ pub const Type = struct { } } + if (d.pointee_type.ip_index != .none and + (d.sentinel == null or d.sentinel.?.ip_index != .none)) + { + return mod.ptrType(.{ + .elem_type = d.pointee_type.ip_index, + .sentinel = if (d.sentinel) |s| s.ip_index else .none, + .alignment = d.@"align", + .host_size = d.host_size, + .bit_offset = d.bit_offset, + .vector_index = d.vector_index, + .size = d.size, + .is_const = !d.mutable, + .is_volatile = d.@"volatile", + .is_allowzero = d.@"allowzero", + .address_space = d.@"addrspace", + }); + } + return Type.Tag.pointer.create(arena, d); } -- cgit v1.2.3 From 31aee50c1a96b7e72b42ee885636b27fbcac8eb4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 5 May 2023 19:13:43 -0700 Subject: InternPool: add a slice encoding This uses the data field to reference its pointer field type, which allows for efficient and infallible access of a slice type's pointer type. --- src/InternPool.zig | 38 +++++++++++++++++++++++++ src/Module.zig | 2 +- src/Sema.zig | 11 ++++---- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/arm/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/x86_64/CodeGen.zig | 11 ++++---- src/codegen.zig | 6 ++-- src/codegen/c.zig | 13 +++++---- src/codegen/c/type.zig | 2 +- src/codegen/llvm.zig | 14 +++++----- src/codegen/spirv.zig | 6 ++-- src/link/Dwarf.zig | 2 +- src/type.zig | 66 +++++++++++++++++++++++++++----------------- src/value.zig | 8 +++--- 15 files changed, 120 insertions(+), 65 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index de69b19dbe..15b6e318ed 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -668,6 +668,9 @@ pub const Tag = enum(u8) { /// A fully explicitly specified pointer type. /// data is payload to Pointer. type_pointer, + /// A slice type. + /// data is Index of underlying pointer type. + type_slice, /// An optional type. /// data is the child type. type_optional, @@ -984,6 +987,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }; }, + .type_slice => { + const ptr_ty_index = @intToEnum(Index, data); + var result = indexToKey(ip, ptr_ty_index); + result.ptr_type.size = .Slice; + return result; + }, + .type_optional => .{ .opt_type = @intToEnum(Index, data) }, .type_error_union => @panic("TODO"), @@ -1041,6 +1051,19 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .ptr_type => |ptr_type| { assert(ptr_type.elem_type != .none); + + if (ptr_type.size == .Slice) { + var new_key = key; + new_key.ptr_type.size = .Many; + const ptr_ty_index = try get(ip, gpa, new_key); + try ip.items.ensureUnusedCapacity(gpa, 1); + ip.items.appendAssumeCapacity(.{ + .tag = .type_slice, + .data = @enumToInt(ptr_ty_index), + }); + return @intToEnum(Index, ip.items.len - 1); + } + // TODO introduce more pointer encodings ip.items.appendAssumeCapacity(.{ .tag = .type_pointer, @@ -1401,6 +1424,20 @@ pub fn childType(ip: InternPool, i: Index) Index { }; } +/// Given a slice type, returns the type of the pointer field. +pub fn slicePtrType(ip: InternPool, i: Index) Index { + switch (i) { + .const_slice_u8_type => return .manyptr_const_u8_type, + .const_slice_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, + else => {}, + } + const item = ip.items.get(@enumToInt(i)); + switch (item.tag) { + .type_slice => return @intToEnum(Index, item.data), + else => unreachable, // not a slice type + } +} + pub fn dump(ip: InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } @@ -1438,6 +1475,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_array => @sizeOf(Vector), .type_vector => @sizeOf(Vector), .type_pointer => @sizeOf(Pointer), + .type_slice => 0, .type_optional => 0, .type_error_union => @sizeOf(ErrorUnion), .type_enum_simple => @sizeOf(EnumSimple), diff --git a/src/Module.zig b/src/Module.zig index e9658ad89f..01e2403377 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6553,7 +6553,7 @@ pub fn populateTestFunctions( } const decl = mod.declPtr(decl_index); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).childType(mod); + const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf, mod).childType(mod); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions diff --git a/src/Sema.zig b/src/Sema.zig index 088d830280..ced5eb247c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -24201,7 +24201,7 @@ fn fieldPtr( if (mem.eql(u8, field_name, "ptr")) { const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const slice_ptr_ty = inner_ty.slicePtrFieldType(buf); + const slice_ptr_ty = inner_ty.slicePtrFieldType(buf, mod); const result_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = slice_ptr_ty, @@ -27804,7 +27804,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), + parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), &val_ptr.castTag(.slice).?.data.ptr, ptr_elem_ty, parent.decl_ref_mut, @@ -27859,7 +27859,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), + parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), &val_ptr.castTag(.slice).?.data.ptr, ptr_elem_ty, parent.decl_ref_mut, @@ -28256,7 +28256,7 @@ fn beginComptimePtrLoad( const slice_val = tv.val.castTag(.slice).?.data; deref.pointee = switch (field_index) { Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), + .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), .val = slice_val.ptr, }, Value.Payload.Slice.len_index => TypedValue{ @@ -29339,8 +29339,9 @@ fn analyzeSlicePtr( slice: Air.Inst.Ref, slice_ty: Type, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const result_ty = slice_ty.slicePtrFieldType(buf); + const result_ty = slice_ty.slicePtrFieldType(buf, mod); if (try sema.resolveMaybeUndefVal(slice)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); return sema.addConstant(result_ty, val.slicePtr()); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 4a10691e02..95a8350c7d 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3435,7 +3435,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf); + const ptr_ty = slice_ty.slicePtrFieldType(&buf, mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 3591ead53d..cc2bc3a613 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2433,7 +2433,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf); + const ptr_ty = slice_ty.slicePtrFieldType(&buf, mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 83e4b4f93d..4231222d4b 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -2462,7 +2462,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); const index_lock: ?RegisterLock = if (index_mcv == .register) self.register_manager.lockRegAssumeUnused(index_mcv.register) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index f6304a0ff3..ee604afd0f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4056,7 +4056,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); const index_ty = self.typeOf(rhs); const index_mcv = try self.resolveInst(rhs); @@ -4081,11 +4081,12 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs); const dst_mcv = try self.allocRegOrMem(inst, false); try self.load(dst_mcv, slice_ptr_field_type, elem_ptr); @@ -8682,7 +8683,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf, mod) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; @@ -8774,7 +8775,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf, mod) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; @@ -10813,7 +10814,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { switch (dst_ptr_ty.ptrSize(mod)) { .Slice => { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf); + const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf, mod); // TODO: this only handles slices stored in the stack const ptr = dst_ptr; diff --git a/src/codegen.zig b/src/codegen.zig index 25e8d892d8..5f5a3f66be 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -361,7 +361,7 @@ pub fn generateSymbol( // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf, mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = slice_ptr_field_type, .val = slice.ptr, @@ -851,7 +851,7 @@ fn lowerParentPtr( var buf: Type.SlicePtrFieldTypeBuffer = undefined; break :offset switch (field_ptr.field_index) { 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(mod), + 1 => field_ptr.container_ty.slicePtrFieldType(&buf, mod).abiSize(mod), else => unreachable, }; }, @@ -951,7 +951,7 @@ fn lowerDeclRef( if (typed_value.ty.isSlice(mod)) { // generate ptr var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf, mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = slice_ptr_field_type, .val = typed_value.val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index b0fb9fa480..039c75de67 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -566,7 +566,7 @@ pub const DeclGen = struct { } var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), val.slicePtr(), .Initializer); + try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), val.slicePtr(), .Initializer); var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -787,7 +787,7 @@ pub const DeclGen = struct { try writer.writeAll("{("); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(&buf, mod); try dg.renderType(writer, ptr_ty); return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); } else { @@ -1088,7 +1088,7 @@ pub const DeclGen = struct { var buf: Type.SlicePtrFieldTypeBuffer = undefined; try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type); + try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), slice.ptr, initializer_type); try writer.writeAll(", "); try dg.renderValue(writer, Type.usize, slice.len, initializer_type); try writer.writeByte('}'); @@ -4107,6 +4107,7 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons } fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4116,7 +4117,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = inst_ty.slicePtrFieldType(&buf); + const ptr_ty = inst_ty.slicePtrFieldType(&buf, mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -5112,7 +5113,7 @@ fn airIsNull( TypedValue{ .ty = payload_ty, .val = Value.zero } else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); - const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf); + const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf, mod); break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; } else rhs: { try writer.writeAll(".is_null"); @@ -5845,7 +5846,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { // &(*(void *)p)[0], although LLVM does via GetElementPtr if (operand == .undef) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf) }, .Initializer); + try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf, mod) }, .Initializer); } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 9e6de6cb21..84ddce6809 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1432,7 +1432,7 @@ pub const CType = extern union { .payload => unreachable, }) |fwd_idx| { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(&buf, mod); if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| { self.storage = .{ .anon = undefined }; self.storage.anon.fields[0] = .{ diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 7fa9b74334..5289becf1e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1638,7 +1638,7 @@ pub const Object = struct { if (ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(&buf, mod); const len_ty = Type.usize; const name = try ty.nameAlloc(gpa, o.module); @@ -2822,7 +2822,7 @@ pub const DeclGen = struct { .Pointer => { if (t.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_type = t.slicePtrFieldType(&buf); + const ptr_type = t.slicePtrFieldType(&buf, mod); const fields: [2]*llvm.Type = .{ try dg.lowerType(ptr_type), @@ -3182,9 +3182,9 @@ pub const DeclGen = struct { const param_ty = fn_info.param_types[it.zig_index - 1]; var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) - param_ty.optionalChild(mod).slicePtrFieldType(&buf) + param_ty.optionalChild(mod).slicePtrFieldType(&buf, mod) else - param_ty.slicePtrFieldType(&buf); + param_ty.slicePtrFieldType(&buf, mod); const ptr_llvm_ty = try dg.lowerType(ptr_ty); const len_llvm_ty = try dg.lowerType(Type.usize); @@ -3387,7 +3387,7 @@ pub const DeclGen = struct { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const fields: [2]*llvm.Value = .{ try dg.lowerValue(.{ - .ty = tv.ty.slicePtrFieldType(&buf), + .ty = tv.ty.slicePtrFieldType(&buf, mod), .val = slice.ptr, }), try dg.lowerValue(.{ @@ -4169,7 +4169,7 @@ pub const DeclGen = struct { const mod = self.module; if (tv.ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = tv.ty.slicePtrFieldType(&buf); + const ptr_ty = tv.ty.slicePtrFieldType(&buf, mod); var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = tv.val.sliceLen(mod), @@ -6654,7 +6654,7 @@ pub const FuncGen = struct { if (payload_ty.isSlice(mod)) { const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf)); + const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf, mod)); return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), ""); } return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 27a79c1c45..e3b5d24ed9 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -669,7 +669,7 @@ pub const DeclGen = struct { const slice = val.castTag(.slice).?.data; var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(&buf, mod); try self.lower(ptr_ty, slice.ptr); try self.addInt(Type.usize, slice.len); @@ -2489,7 +2489,7 @@ pub const DeclGen = struct { const index_id = try self.resolve(bin_op.rhs); var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&slice_buf); + const ptr_ty = slice_ty.slicePtrFieldType(&slice_buf, mod); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); const slice_ptr = try self.extractField(ptr_ty, slice_id, 0); @@ -2987,7 +2987,7 @@ pub const DeclGen = struct { var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (payload_ty.isSlice(mod)) - payload_ty.slicePtrFieldType(&ptr_buf) + payload_ty.slicePtrFieldType(&ptr_buf, mod) else payload_ty; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 178f9fa64c..3e4e90951e 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -278,7 +278,7 @@ pub const DeclState = struct { var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); var buf = try arena.create(Type.SlicePtrFieldTypeBuffer); - const ptr_ty = ty.slicePtrFieldType(buf); + const ptr_ty = ty.slicePtrFieldType(buf, mod); try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(6); diff --git a/src/type.zig b/src/type.zig index a51ae273c1..dac12aa74e 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2042,7 +2042,18 @@ pub const Type = struct { else => unreachable, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - else => @panic("TODO"), + .ptr_type => |ptr_type| { + if (ptr_type.alignment != 0) { + return @intCast(u32, ptr_type.alignment); + } else if (opt_sema) |sema| { + const res = try ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); + return res.scalar; + } else { + return (ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + } + }, + .opt_type => |child| return child.toType().ptrAlignmentAdvanced(mod, opt_sema), + else => unreachable, }, } } @@ -3060,33 +3071,36 @@ pub const Type = struct { pointer: Payload.Pointer, }; - pub fn slicePtrFieldType(self: Type, buffer: *SlicePtrFieldTypeBuffer) Type { - switch (self.tag()) { - .pointer => { - const payload = self.castTag(.pointer).?.data; - assert(payload.size == .Slice); - - buffer.* = .{ - .pointer = .{ - .data = .{ - .pointee_type = payload.pointee_type, - .sentinel = payload.sentinel, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = .Many, + pub fn slicePtrFieldType(ty: Type, buffer: *SlicePtrFieldTypeBuffer, mod: *const Module) Type { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .pointer => { + const payload = ty.castTag(.pointer).?.data; + assert(payload.size == .Slice); + + buffer.* = .{ + .pointer = .{ + .data = .{ + .pointee_type = payload.pointee_type, + .sentinel = payload.sentinel, + .@"align" = payload.@"align", + .@"addrspace" = payload.@"addrspace", + .bit_offset = payload.bit_offset, + .host_size = payload.host_size, + .vector_index = payload.vector_index, + .@"allowzero" = payload.@"allowzero", + .mutable = payload.mutable, + .@"volatile" = payload.@"volatile", + .size = .Many, + }, }, - }, - }; - return Type.initPayload(&buffer.pointer.base); - }, + }; + return Type.initPayload(&buffer.pointer.base); + }, - else => unreachable, + else => unreachable, + }, + else => return mod.intern_pool.slicePtrType(ty.ip_index).toType(), } } diff --git a/src/value.zig b/src/value.zig index a34a022dea..f8188c64ab 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2078,7 +2078,7 @@ pub const Value = struct { } var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); + const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); return eqlAdvanced(a_payload.ptr, ptr_ty, b_payload.ptr, ptr_ty, mod, opt_sema); }, @@ -2237,7 +2237,7 @@ pub const Value = struct { } var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); + const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); const a_ptr = switch (a_ty.ptrSize(mod)) { .Slice => a.slicePtr(), .One => a, @@ -2376,7 +2376,7 @@ pub const Value = struct { .slice => { const slice = val.castTag(.slice).?.data; var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); + const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); hash(slice.ptr, ptr_ty, hasher, mod); hash(slice.len, Type.usize, hasher, mod); }, @@ -2499,7 +2499,7 @@ pub const Value = struct { .slice => { const slice = val.castTag(.slice).?.data; var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf); + const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); slice.ptr.hashUncoerced(ptr_ty, hasher, mod); }, else => val.hashPtr(hasher, mod), -- cgit v1.2.3 From 75900ec1b5a250935a6abe050a006738fba99e66 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 6 May 2023 19:20:52 -0700 Subject: stage2: move integer values to InternPool --- src/Air.zig | 1 + src/InternPool.zig | 18 +- src/Module.zig | 73 ++- src/RangeSet.zig | 6 +- src/Sema.zig | 738 +++++++++------------- src/TypedValue.zig | 21 +- src/Zir.zig | 1 + src/arch/wasm/CodeGen.zig | 36 +- src/arch/x86_64/CodeGen.zig | 22 +- src/codegen.zig | 65 +- src/codegen/c.zig | 240 +++----- src/codegen/llvm.zig | 105 ++-- src/codegen/spirv.zig | 46 +- src/link/Dwarf.zig | 3 +- src/type.zig | 89 ++- src/value.zig | 1431 +++++++++++++++++-------------------------- 16 files changed, 1168 insertions(+), 1727 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index 43fc55e811..549583e697 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -913,6 +913,7 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), + negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), void_value = @enumToInt(InternPool.Index.void_value), diff --git a/src/InternPool.zig b/src/InternPool.zig index fec5e721d0..d2f3bf81fe 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -390,6 +390,8 @@ pub const Index = enum(u32) { one, /// `1` (usize) one_usize, + /// `-1` (comptime_int) + negative_one, /// `std.builtin.CallingConvention.C` calling_convention_c, /// `std.builtin.CallingConvention.Inline` @@ -624,6 +626,11 @@ pub const static_keys = [_]Key{ .storage = .{ .u64 = 1 }, } }, + .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .i64 = -1 }, + } }, + .{ .enum_tag = .{ .ty = .calling_convention_type, .tag = .{ @@ -999,23 +1006,23 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), .simple_internal => @panic("TODO"), - .int_u32 => return .{ .int = .{ + .int_u32 => .{ .int = .{ .ty = .u32_type, .storage = .{ .u64 = data }, } }, - .int_i32 => return .{ .int = .{ + .int_i32 => .{ .int = .{ .ty = .i32_type, .storage = .{ .i64 = @bitCast(i32, data) }, } }, - .int_usize => return .{ .int = .{ + .int_usize => .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = data }, } }, - .int_comptime_int_u32 => return .{ .int = .{ + .int_comptime_int_u32 => .{ .int = .{ .ty = .comptime_int_type, .storage = .{ .u64 = data }, } }, - .int_comptime_int_i32 => return .{ .int = .{ + .int_comptime_int_i32 => .{ .int = .{ .ty = .comptime_int_type, .storage = .{ .i64 = @bitCast(i32, data) }, } }, @@ -1137,6 +1144,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .int => |int| b: { switch (int.ty) { + .none => unreachable, .u32_type => switch (int.storage) { .big_int => |big_int| { if (big_int.to(u32)) |casted| { diff --git a/src/Module.zig b/src/Module.zig index 01e2403377..9315c9efa7 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6597,7 +6597,7 @@ pub fn populateTestFunctions( field_vals.* = .{ try Value.Tag.slice.create(arena, .{ .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index), - .len = try Value.Tag.int_u64.create(arena, test_name_slice.len), + .len = try mod.intValue(Type.usize, test_name_slice.len), }), // name try Value.Tag.decl_ref.create(arena, test_decl_index), // func Value.null, // async_frame_size @@ -6628,7 +6628,7 @@ pub fn populateTestFunctions( new_var.* = decl.val.castTag(.variable).?.data.*; new_var.init = try Value.Tag.slice.create(arena, .{ .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index), - .len = try Value.Tag.int_u64.create(arena, mod.test_functions.count()), + .len = try mod.intValue(Type.usize, mod.test_functions.count()), }); const new_val = try Value.Tag.variable.create(arena, new_var); @@ -6875,6 +6875,38 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } +pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); + if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); + var limbs_buffer: [4]usize = undefined; + var big_int = BigIntMutable.init(&limbs_buffer, x); + return intValue_big(mod, ty, big_int.toConst()); +} + +pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.ip_index, + .storage = .{ .big_int = x }, + } }); + return i.toValue(); +} + +pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.ip_index, + .storage = .{ .u64 = x }, + } }); + return i.toValue(); +} + +pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.ip_index, + .storage = .{ .i64 = x }, + } }); + return i.toValue(); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } @@ -6907,32 +6939,27 @@ pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { assert(!val.isUndef()); - switch (val.tag()) { - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = true }; - return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - // Zero is still a possibility, in which case unsigned is fine - for (limbs) |limb| { - if (limb != 0) break; - } else return 0; // val == 0 - assert(sign); - const big: std.math.big.int.Const = .{ .limbs = limbs, .positive = false }; - return @intCast(u16, big.bitCountTwosComp()); - }, - .int_i64 => { - const x = val.castTag(.int_i64).?.data; - if (x >= 0) return Type.smallestUnsignedBits(@intCast(u64, x)); + + const key = mod.intern_pool.indexToKey(val.ip_index); + switch (key.int.storage) { + .i64 => |x| { + if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted); assert(sign); + // Protect against overflow in the following negation. + if (x == std.math.minInt(i64)) return 64; return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; }, - else => { - const x = val.toUnsignedInt(mod); + .u64 => |x| { return Type.smallestUnsignedBits(x) + @boolToInt(sign); }, + .big_int => |big| { + if (big.positive) return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); + + // Zero is still a possibility, in which case unsigned is fine + if (big.eqZero()) return 0; + + return @intCast(u16, big.bitCountTwosComp()); + }, } } diff --git a/src/RangeSet.zig b/src/RangeSet.zig index 2e28a562c6..a015c7b568 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -35,8 +35,8 @@ pub fn add( src: SwitchProngSrc, ) !?SwitchProngSrc { for (self.ranges.items) |range| { - if (last.compareAll(.gte, range.first, ty, self.module) and - first.compareAll(.lte, range.last, ty, self.module)) + if (last.compareScalar(.gte, range.first, ty, self.module) and + first.compareScalar(.lte, range.last, ty, self.module)) { return range.src; // They overlap. } @@ -53,7 +53,7 @@ const LessThanContext = struct { ty: Type, module: *Module }; /// Assumes a and b do not overlap fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool { - return a.first.compareAll(.lt, b.first, ctx.ty, ctx.module); + return a.first.compareScalar(.lt, b.first, ctx.ty, ctx.module); } pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { diff --git a/src/Sema.zig b/src/Sema.zig index 43aa7e056e..3aa845c10b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2995,7 +2995,6 @@ fn zirEnumDecl( var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; var last_tag_val: ?Value = null; - var tag_val_buf: Value.Payload.U64 = undefined; while (field_i < fields_len) : (field_i += 1) { if (field_i % 32 == 0) { cur_bit_bag = sema.code.extra[bit_bag_index]; @@ -3084,11 +3083,7 @@ fn zirEnumDecl( return sema.failWithOwnedErrorMsg(msg); } } else { - tag_val_buf = .{ - .base = .{ .tag = .int_u64 }, - .data = field_i, - }; - last_tag_val = Value.initPayload(&tag_val_buf.base); + last_tag_val = try mod.intValue(enum_obj.tag_ty, field_i); } if (!(try sema.intFitsInType(last_tag_val.?, enum_obj.tag_ty, null))) { @@ -5180,16 +5175,23 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); - const arena = sema.arena; + const mod = sema.mod; const int = sema.code.instructions.items(.data)[inst].str; const byte_count = int.len * @sizeOf(std.math.big.Limb); const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count]; - const limbs = try arena.alloc(std.math.big.Limb, int.len); + + // TODO: this allocation and copy is only needed because the limbs may be unaligned. + // If ZIR is adjusted so that big int limbs are guaranteed to be aligned, these + // two lines can be removed. + const limbs = try sema.arena.alloc(std.math.big.Limb, int.len); @memcpy(mem.sliceAsBytes(limbs), limb_bytes); return sema.addConstant( Type.comptime_int, - try Value.Tag.int_big_positive.create(arena, limbs), + try mod.intValue_big(Type.comptime_int, .{ + .limbs = limbs, + .positive = true, + }), ); } @@ -8095,6 +8097,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -8107,12 +8110,13 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } switch (val.tag()) { .@"error" => { - const payload = try sema.arena.create(Value.Payload.U64); - payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, - }; - return sema.addConstant(Type.err_int, Value.initPayload(&payload.base)); + return sema.addConstant( + Type.err_int, + try mod.intValue( + Type.err_int, + (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, + ), + ); }, // This is not a valid combination with the type `anyerror`. @@ -8280,8 +8284,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } if (try sema.resolveMaybeUndefVal(enum_tag)) |enum_tag_val| { - var buffer: Value.Payload.U64 = undefined; - const val = enum_tag_val.enumToInt(enum_tag_ty, &buffer); + const val = try enum_tag_val.enumToInt(enum_tag_ty, mod); return sema.addConstant(int_tag_ty, try val.copy(sema.arena)); } @@ -9685,7 +9688,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, mod); + const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod); const dest_max_val = if (is_vector) try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) else @@ -9946,7 +9949,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(dest_ty, try operand_val.floatCast(sema.arena, dest_ty, target)); + return sema.addConstant(dest_ty, try operand_val.floatCast(sema.arena, dest_ty, mod)); } if (dest_is_comptime_float) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_float'", .{}); @@ -10470,7 +10473,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?Module.SwitchProngSrc = &.{}; var seen_errors = SwitchErrorSet.init(gpa); - var range_set = RangeSet.init(gpa, sema.mod); + var range_set = RangeSet.init(gpa, mod); var true_count: u8 = 0; var false_count: u8 = 0; @@ -10596,11 +10599,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .{field_name}, ); } - try sema.mod.errNoteNonLazy( - operand_ty.declSrcLoc(sema.mod), + try mod.errNoteNonLazy( + operand_ty.declSrcLoc(mod), msg, "enum '{}' declared here", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ); break :msg msg; }; @@ -10827,7 +10830,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer arena.deinit(); const min_int = try operand_ty.minInt(arena.allocator(), mod); - const max_int = try operand_ty.maxInt(arena.allocator(), mod); + const max_int = try operand_ty.maxIntScalar(mod); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -10926,13 +10929,13 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, src, "else prong required when switching on type '{}'", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ); } var seen_values = ValueSrcMap.initContext(gpa, .{ .ty = operand_ty, - .mod = sema.mod, + .mod = mod, }); defer seen_values.deinit(); @@ -10996,7 +10999,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .ComptimeFloat, .Float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } @@ -11054,7 +11057,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + if (operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11080,7 +11083,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + if (operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11128,7 +11131,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand)) { return Air.Inst.Ref.unreachable_value; } - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and + if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); @@ -11182,7 +11185,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; @@ -11245,9 +11248,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item_last_ref = try sema.resolveInst(last_ref); const item_last = sema.resolveConstValue(block, .unneeded, item_last_ref, undefined) catch unreachable; - while (item.compareAll(.lte, item_last, operand_ty, sema.mod)) : ({ + while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ // Previous validation has resolved any possible lazy values. - item = try sema.intAddScalar(item, Value.one); + item = try sema.intAddScalar(item, Value.one, operand_ty); }) { cases_len += 1; @@ -11260,7 +11263,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .range = .{ .prong = multi_i, .item = range_i } }; - const decl = sema.mod.declPtr(case_block.src_decl); + const decl = mod.declPtr(case_block.src_decl); try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); unreachable; }, @@ -11289,14 +11292,14 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }; - const decl = sema.mod.declPtr(case_block.src_decl); + const decl = mod.declPtr(case_block.src_decl); try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); unreachable; }, @@ -11333,7 +11336,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (items) |item_ref| { const item = try sema.resolveInst(item_ref); const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else @@ -11461,7 +11464,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .Enum => { if (operand_ty.isNonexhaustiveEnum() and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } for (seen_enum_fields, 0..) |f, i| { @@ -11476,7 +11479,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.wip_capture_scope = child_block.wip_capture_scope; const analyze_body = if (union_originally) blk: { - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; @@ -11499,7 +11502,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .ErrorSet => { if (operand_ty.isAnyError()) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } for (operand_ty.errorSetNames()) |error_name| { @@ -11587,7 +11590,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } }, else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), }; @@ -11598,7 +11601,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.wip_capture_scope = wip_captures.scope; case_block.inline_case_capture = .none; - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and + if (mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); @@ -11679,7 +11682,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; const min = try ty.minInt(sema.arena, mod); - const max = try ty.maxInt(sema.arena, mod); + const max = try ty.maxIntScalar(mod); return RangeSetUnhandledIterator{ .sema = sema, @@ -11693,19 +11696,19 @@ const RangeSetUnhandledIterator = struct { fn next(it: *RangeSetUnhandledIterator) !?Value { while (it.range_i < it.ranges.len) : (it.range_i += 1) { if (!it.first) { - it.cur = try it.sema.intAdd(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); } it.first = false; - if (it.cur.compareAll(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { + if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { return it.cur; } it.cur = it.ranges[it.range_i].last; } if (!it.first) { - it.cur = try it.sema.intAdd(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); } it.first = false; - if (it.cur.compareAll(.lte, it.max, it.ty, it.sema.mod)) { + if (it.cur.compareScalar(.lte, it.max, it.ty, it.sema.mod)) { return it.cur; } return null; @@ -11750,7 +11753,7 @@ fn validateSwitchRange( ) CompileError!void { const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; - if (first_val.compareAll(.gt, last_val, operand_ty, sema.mod)) { + if (first_val.compareScalar(.gt, last_val, operand_ty, sema.mod)) { const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), src_node_offset, .first); return sema.fail(block, src, "range start value is greater than the end value", .{}); } @@ -12208,16 +12211,11 @@ fn zirShl( return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { - var bits_payload = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(mod).bits, - }; - const bit_value = Value.initPayload(&bits_payload.base); + const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12236,8 +12234,7 @@ fn zirShl( if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12309,7 +12306,7 @@ fn zirShl( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); + const bit_count_val = try mod.intValue(scalar_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); @@ -12396,16 +12393,11 @@ fn zirShr( return lhs; } if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { - var bits_payload = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(mod).bits, - }; - const bit_value = Value.initPayload(&bits_payload.base); + const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12424,8 +12416,7 @@ fn zirShr( if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); + const rhs_elem = try rhs_val.elemValue(sema.mod, i); if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), @@ -12465,7 +12456,7 @@ fn zirShr( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); + const bit_count_val = try mod.intValue(scalar_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); @@ -12587,10 +12578,9 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); - var elem_val_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod); } return sema.addConstant( @@ -12695,6 +12685,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -12714,11 +12705,11 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: { if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined); - return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); + return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); }; const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse { assert(!rhs_is_tuple); - return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(sema.mod)}); + return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(mod)}); }; const resolved_elem_ty = t: { @@ -12780,8 +12771,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ), }; - const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); - const mod = sema.mod; + const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, mod); const ptr_addrspace = p: { if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod); if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod); @@ -12815,7 +12805,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_elem_i = elem_i; const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type; const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); @@ -12825,7 +12815,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs_elem_i = elem_i - lhs_len; const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type; const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(sema.mod, sema.arena, rhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); @@ -12842,12 +12832,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = result_ty, .@"addrspace" = ptr_as, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = resolved_elem_ty, .@"addrspace" = ptr_as, }); @@ -13009,6 +12999,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -13025,10 +13016,9 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } // Analyze the lhs first, to catch the case that someone tried to do exponentiation - const mod = sema.mod; const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse { const msg = msg: { - const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); switch (lhs_ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => { @@ -13048,7 +13038,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.fail(block, rhs_src, "operation results in overflow", .{}); const result_len = try sema.usizeCast(block, src, result_len_u64); - const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); + const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, mod); const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); @@ -13065,7 +13055,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. if (lhs_len == 1) { - const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, 0); + const elem_val = try lhs_sub_val.elemValue(mod, 0); break :v try Value.Tag.repeated.create(sema.arena, elem_val); } @@ -13074,7 +13064,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai while (elem_i < result_len) { var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { - const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_i); + const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); element_vals[elem_i] = elem_val; elem_i += 1; } @@ -13090,12 +13080,12 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.requireRuntimeBlock(block, src, lhs_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = result_ty, .@"addrspace" = ptr_as, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = lhs_info.elem_type, .@"addrspace" = ptr_as, }); @@ -13797,7 +13787,7 @@ fn addDivIntOverflowSafety( } const min_int = try resolved_type.minInt(sema.arena, mod); - const neg_one_scalar = try Value.Tag.int_i64.create(sema.arena, -1); + const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector) try Value.Tag.repeated.create(sema.arena, neg_one_scalar) else @@ -13806,12 +13796,12 @@ fn addDivIntOverflowSafety( // If the LHS is comptime-known to be not equal to the min int, // no overflow is possible. if (maybe_lhs_val) |lhs_val| { - if (lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; + if (try lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; } // If the RHS is comptime-known to not be equal to -1, no overflow is possible. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; + if (try rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; } var ok: Air.Inst.Ref = .none; @@ -14038,23 +14028,18 @@ fn intRem( const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty); } return Value.Tag.aggregate.create(sema.arena, result_data); } - return sema.intRemScalar(lhs, rhs); + return sema.intRemScalar(lhs, rhs, ty); } -fn intRemScalar( - sema: *Sema, - lhs: Value, - rhs: Value, -) CompileError!Value { +fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value { const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -14079,7 +14064,7 @@ fn intRemScalar( var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return Value.fromBigInt(sema.arena, result_r.toConst()); + return mod.intValue_big(scalar_ty, result_r.toConst()); } fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -15063,7 +15048,7 @@ fn analyzePtrArithmetic( .ptr_sub => addr - elem_size * offset_int, else => unreachable, }; - const new_ptr_val = try Value.Tag.int_u64.create(sema.arena, new_addr); + const new_ptr_val = try mod.intValue(new_ptr_ty, new_addr); return sema.addConstant(new_ptr_ty, new_ptr_val); } if (air_tag == .ptr_sub) { @@ -15826,9 +15811,9 @@ fn zirBuiltinSrc( // fn_name: [:0]const u8, field_values[1] = func_name_val; // line: u32 - field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try Value.Tag.int_u64.create(sema.arena, extra.line + 1)); + field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try mod.intValue(Type.u32, extra.line + 1)); // column: u32, - field_values[3] = try Value.Tag.int_u64.create(sema.arena, extra.column + 1); + field_values[3] = try mod.intValue(Type.u32, extra.column + 1); return sema.addConstant( try sema.getBuiltinType("SourceLocation"), @@ -15977,7 +15962,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, param_vals.len), + .len = try mod.intValue(Type.usize, param_vals.len), }); }; @@ -15994,7 +15979,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // calling_convention: CallingConvention, try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)), // alignment: comptime_int, - try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(mod)), + try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)), // is_generic: bool, Value.makeBool(info.is_generic), // is_var_args: bool, @@ -16022,7 +16007,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai @enumToInt(info.signedness), ); // bits: comptime_int, - field_values[1] = try Value.Tag.int_u64.create(sema.arena, info.bits); + field_values[1] = try mod.intValue(Type.comptime_int, info.bits); return sema.addConstant( type_info_ty, @@ -16035,7 +16020,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Float => { const field_values = try sema.arena.alloc(Value, 1); // bits: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(mod)); + field_values[0] = try mod.intValue(Type.comptime_int, ty.bitSize(mod)); return sema.addConstant( type_info_ty, @@ -16048,7 +16033,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Pointer => { const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) - try Value.Tag.int_u64.create(sema.arena, info.@"align") + try mod.intValue(Type.comptime_int, info.@"align") else try info.pointee_type.lazyAbiAlignment(mod, sema.arena); @@ -16084,7 +16069,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 3); // len: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); + field_values[0] = try mod.intValue(Type.comptime_int, info.len); // child: type, field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); // sentinel: ?*const anyopaque, @@ -16102,7 +16087,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = ty.arrayInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // len: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); + field_values[0] = try mod.intValue(Type.comptime_int, info.len); // child: type, field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); @@ -16202,7 +16187,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const new_decl_val = try Value.Tag.decl_ref.create(sema.arena, new_decl); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = new_decl_val, - .len = try Value.Tag.int_u64.create(sema.arena, vals.len), + .len = try mod.intValue(Type.usize, vals.len), }); break :v try Value.Tag.opt_payload.create(sema.arena, slice_val); } else Value.null; @@ -16263,8 +16248,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const tag_val = Value.initPayload(&tag_val_payload.base); - var buffer: Value.Payload.U64 = undefined; - const int_val = try tag_val.enumToInt(ty, &buffer).copy(fields_anon_decl.arena()); + const int_val = try tag_val.enumToInt(ty, mod); const name = enum_fields.keys()[i]; const name_val = v: { @@ -16379,7 +16363,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: type, try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty), // alignment: comptime_int, - try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment), + try mod.intValue(Type.comptime_int, alignment), }; field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), union_field_fields); } @@ -16398,7 +16382,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, union_field_vals.len), + .len = try mod.intValue(Type.usize, union_field_vals.len), }); }; @@ -16476,7 +16460,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try Value.Tag.int_u64.create(fields_anon_decl.arena(), bytes.len), + .len = try mod.intValue(Type.usize, bytes.len), }); }; @@ -16518,7 +16502,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try Value.Tag.int_u64.create(fields_anon_decl.arena(), bytes.len), + .len = try mod.intValue(Type.usize, bytes.len), }); }; @@ -16540,7 +16524,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(field.is_comptime), // alignment: comptime_int, - try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment), + try mod.intValue(Type.comptime_int, alignment), }; field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); } @@ -16561,7 +16545,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); break :v try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, struct_field_vals.len), + .len = try mod.intValue(Type.usize, struct_field_vals.len), }); }; @@ -16636,6 +16620,7 @@ fn typeInfoDecls( type_info_ty: Type, opt_namespace: ?*Module.Namespace, ) CompileError!Value { + const mod = sema.mod; var decls_anon_decl = try block.startAnonDecl(); defer decls_anon_decl.deinit(); @@ -16646,9 +16631,9 @@ fn typeInfoDecls( type_info_ty.getNamespace().?, "Declaration", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); - const declaration_ty_decl = sema.mod.declPtr(declaration_ty_decl_index); + const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); break :t try declaration_ty_decl.val.toType().copy(decls_anon_decl.arena()); }; try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena)); @@ -16676,7 +16661,7 @@ fn typeInfoDecls( ); return try Value.Tag.slice.create(sema.arena, .{ .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, decl_vals.items.len), + .len = try mod.intValue(Type.usize, decl_vals.items.len), }); } @@ -16713,7 +16698,7 @@ fn typeInfoNamespaceDecls( ); break :v try Value.Tag.slice.create(decls_anon_decl, .{ .ptr = try Value.Tag.decl_ref.create(decls_anon_decl, new_decl), - .len = try Value.Tag.int_u64.create(decls_anon_decl, bytes.len), + .len = try mod.intValue(Type.usize, bytes.len), }); }; @@ -18620,10 +18605,9 @@ fn zirUnaryMath( if (val.isUndef()) return sema.addConstUndef(result_ty); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod); } return sema.addConstant( @@ -18717,7 +18701,12 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return block.addUnOp(.tag_name, casted_operand); } -fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirReify( + sema: *Sema, + block: *Block, + extended: Zir.Inst.Extended.InstData, + inst: Zir.Inst.Index, +) CompileError!Air.Inst.Ref { const mod = sema.mod; const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; @@ -18730,7 +18719,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const union_val = val.cast(Value.Payload.Union).?.data; const target = mod.getTarget(); const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag, mod).?; - if (union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src); + if (try union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src); switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, @@ -18845,10 +18834,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } else if (ptr_size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); @@ -18893,7 +18882,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?; } else null; - const ty = try Type.array(sema.arena, len, sentinel, child_ty, sema.mod); + const ty = try Type.array(sema.arena, len, sentinel, child_ty, mod); return sema.addType(ty); }, .Optional => { @@ -18938,13 +18927,12 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in try names.ensureUnusedCapacity(sema.arena, len); var i: usize = 0; while (i < len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = slice_val.ptr.elemValueBuffer(mod, i, &buf); + const elem_val = try slice_val.ptr.elemValue(mod, i); const struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, sema.mod); + const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); const kv = try mod.getErrorValue(name_str); const gop = names.getOrPutAssumeCapacity(kv.key); @@ -19061,7 +19049,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var field_i: usize = 0; while (field_i < fields_len) : (field_i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, field_i); + const elem_val = try fields_val.elemValue(mod, field_i); const field_struct_val: []const Value = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -19072,7 +19060,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const field_name = try name_val.toAllocatedBytes( Type.const_slice_u8, new_decl_arena_allocator, - sema.mod, + mod, ); if (!try sema.intFitsInType(value_val, enum_obj.tag_ty, null)) { @@ -19183,7 +19171,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in Type.Tag.union_tagged else if (layout != .Auto) Type.Tag.@"union" - else switch (block.sema.mod.optimizeMode()) { + else switch (mod.optimizeMode()) { .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", }; @@ -19236,7 +19224,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); + const elem_val = try fields_val.elemValue(mod, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -19249,7 +19237,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const field_name = try name_val.toAllocatedBytes( Type.const_slice_u8, new_decl_arena_allocator, - sema.mod, + mod, ); if (enum_field_names) |set| { @@ -19260,7 +19248,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const enum_has_field = names.orderedRemove(field_name); if (!enum_has_field) { const msg = msg: { - const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; @@ -19293,10 +19281,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .union_field); try sema.addDeclaredHereNote(msg, field_ty); @@ -19305,10 +19293,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.failWithOwnedErrorMsg(msg); } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { - const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); + const src_decl = mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); try sema.addDeclaredHereNote(msg, field_ty); @@ -19386,8 +19374,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in var noalias_bits: u32 = 0; var i: usize = 0; while (i < args_len) : (i += 1) { - var arg_buf: Value.ElemValueBuffer = undefined; - const arg = args_slice_val.ptr.elemValueBuffer(mod, i, &arg_buf); + const arg = try args_slice_val.ptr.elemValue(mod, i); const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // is_generic: bool, @@ -19486,7 +19473,7 @@ fn reifyStruct( try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); + const elem_val = try fields_val.elemValue(sema.mod, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // name: []const u8 @@ -19892,12 +19879,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)}); - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = addr, - }; - return sema.addConstant(ptr_ty, Value.initPayload(&val_payload.base)); + return sema.addConstant(ptr_ty, try mod.intValue(ptr_ty, addr)); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -19908,14 +19890,9 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (ptr_align > 1) { - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_align - 1, - }; const align_minus_1 = try sema.addConstant( Type.usize, - Value.initPayload(&val_payload.base), + try mod.intValue(Type.usize, ptr_align - 1), ); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); @@ -20254,10 +20231,9 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod), ); } - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); } return sema.addConstant( @@ -20302,14 +20278,9 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (block.wantSafety() and dest_align > 1 and try sema.typeHasRuntimeBits(ptr_info.pointee_type)) { - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = dest_align - 1, - }; const align_minus_1 = try sema.addConstant( Type.usize, - Value.initPayload(&val_payload.base), + try mod.intValue(Type.usize, dest_align - 1), ); const actual_ptr = if (ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr, ptr_ty) @@ -20359,13 +20330,12 @@ fn zirBitCount( if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); const count = comptimeOp(elem_val, scalar_ty, mod); - elem.* = try Value.Tag.int_u64.create(sema.arena, count); + elem.* = try mod.intValue(scalar_ty, count); } return sema.addConstant( result_ty, @@ -20429,10 +20399,9 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena); } return sema.addConstant( @@ -20478,10 +20447,9 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - var elem_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try val.elemValue(sema.mod, i); elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena); } return sema.addConstant( @@ -21241,11 +21209,10 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); - var accum: Value = try operand_val.elemValue(mod, sema.arena, 0); - var elem_buf: Value.ElemValueBuffer = undefined; + var accum: Value = try operand_val.elemValue(mod, 0); var i: u32 = 1; while (i < vec_len) : (i += 1) { - const elem_val = operand_val.elemValueBuffer(mod, i, &elem_buf); + const elem_val = try operand_val.elemValue(mod, i); switch (operation) { .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod), .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod), @@ -21359,8 +21326,7 @@ fn analyzeShuffle( var i: usize = 0; while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(sema.mod, i, &buf); + const elem = try mask.elemValue(sema.mod, i); if (elem.isUndef()) continue; const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; @@ -21398,8 +21364,7 @@ fn analyzeShuffle( i = 0; while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const mask_elem_val = mask.elemValueBuffer(sema.mod, i, &buf); + const mask_elem_val = try mask.elemValue(sema.mod, i); if (mask_elem_val.isUndef()) { values[i] = Value.undef; continue; @@ -21407,9 +21372,9 @@ fn analyzeShuffle( const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); if (int >= 0) { - values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned); + values[i] = try a_val.elemValue(sema.mod, unsigned); } else { - values[i] = try b_val.elemValue(sema.mod, sema.arena, unsigned); + values[i] = try b_val.elemValue(sema.mod, unsigned); } } const res_val = try Value.Tag.aggregate.create(sema.arena, values); @@ -21430,7 +21395,7 @@ fn analyzeShuffle( const expand_mask_values = try sema.arena.alloc(Value, max_len); i = 0; while (i < min_len) : (i += 1) { - expand_mask_values[i] = try Value.Tag.int_u64.create(sema.arena, i); + expand_mask_values[i] = try mod.intValue(Type.comptime_int, i); } while (i < max_len) : (i += 1) { expand_mask_values[i] = Value.negative_one; @@ -21509,15 +21474,14 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C if (maybe_b) |b_val| { if (b_val.isUndef()) return sema.addConstUndef(vec_ty); - var buf: Value.ElemValueBuffer = undefined; const elems = try sema.gpa.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const pred_elem_val = pred_val.elemValueBuffer(sema.mod, i, &buf); + const pred_elem_val = try pred_val.elemValue(sema.mod, i); const should_choose_a = pred_elem_val.toBool(mod); if (should_choose_a) { - elem.* = a_val.elemValueBuffer(sema.mod, i, &buf); + elem.* = try a_val.elemValue(sema.mod, i); } else { - elem.* = b_val.elemValueBuffer(sema.mod, i, &buf); + elem.* = try b_val.elemValue(sema.mod, i); } } @@ -22067,12 +22031,10 @@ fn analyzeMinMax( cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; const elems = try sema.arena.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { - const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem_val = try cur_val.elemValue(mod, i); + const rhs_elem_val = try operand_val.elemValue(mod, i); elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod); } cur_minmax = try sema.addConstant( @@ -22105,10 +22067,10 @@ fn analyzeMinMax( if (len == 0) break :blk orig_ty; if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats - var cur_min: Value = try val.elemValue(mod, sema.arena, 0); + var cur_min: Value = try val.elemValue(mod, 0); var cur_max: Value = cur_min; for (1..len) |idx| { - const elem_val = try val.elemValue(mod, sema.arena, idx); + const elem_val = try val.elemValue(mod, idx); if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val; if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val; @@ -23987,7 +23949,7 @@ fn fieldVal( if (mem.eql(u8, field_name, "len")) { return sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(arena, inner_ty.arrayLen(mod)), + try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), ); } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); @@ -24179,7 +24141,7 @@ fn fieldPtr( defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, - try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen(mod)), + try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), 0, // default alignment )); } else { @@ -25352,7 +25314,7 @@ fn elemValArray( } if (maybe_index_val) |index_val| { const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_val = try array_val.elemValue(sema.mod, sema.arena, index); + const elem_val = try array_val.elemValue(mod, index); return sema.addConstant(elem_ty, elem_val); } } @@ -25914,7 +25876,7 @@ fn coerceExtra( // we use a dummy pointer value with the required alignment. const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = if (dest_info.@"align" != 0) - try Value.Tag.int_u64.create(sema.arena, dest_info.@"align") + try mod.intValue(Type.usize, dest_info.@"align") else try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), .len = Value.zero, @@ -26022,7 +25984,7 @@ fn coerceExtra( .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) { .ComptimeFloat => { const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const result_val = try val.floatCast(sema.arena, dest_ty, target); + const result_val = try val.floatCast(sema.arena, dest_ty, mod); return try sema.addConstant(dest_ty, result_val); }, .Float => { @@ -26030,7 +25992,7 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } if (try sema.resolveMaybeUndefVal(inst)) |val| { - const result_val = try val.floatCast(sema.arena, dest_ty, target); + const result_val = try val.floatCast(sema.arena, dest_ty, mod); if (!val.eql(result_val, inst_ty, sema.mod)) { return sema.fail( block, @@ -27431,11 +27393,13 @@ fn storePtrVal( const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), }; operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), @@ -27589,7 +27553,7 @@ fn beginComptimePtrMutation( assert(bytes.len >= dest_len); const elems = try arena.alloc(Value, @intCast(usize, dest_len)); for (elems, 0..) |*elem, i| { - elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); + elem.* = try mod.intValue(elem_ty, bytes[i]); } val_ptr.* = try Value.Tag.aggregate.create(arena, elems); @@ -27618,7 +27582,7 @@ fn beginComptimePtrMutation( const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const elems = try arena.alloc(Value, @intCast(usize, dest_len)); for (bytes, 0..) |byte, i| { - elems[i] = try Value.Tag.int_u64.create(arena, byte); + elems[i] = try mod.intValue(elem_ty, byte); } if (parent.ty.sentinel(mod)) |sent_val| { assert(elems.len == bytes.len + 1); @@ -28111,7 +28075,7 @@ fn beginComptimePtrLoad( maybe_array_ty: ?Type, ) ComptimePtrLoadError!ComptimePtrLoadKit { const mod = sema.mod; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); var deref: ComptimePtrLoadKit = switch (ptr_val.ip_index) { .null_value => { @@ -28128,7 +28092,7 @@ fn beginComptimePtrLoad( else => unreachable, }; const is_mutable = ptr_val.tag() == .decl_ref_mut; - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; @@ -28150,7 +28114,7 @@ fn beginComptimePtrLoad( // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { - assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, sema.mod))); + assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, mod))); } if (elem_ptr.index != 0) { @@ -28184,11 +28148,11 @@ fn beginComptimePtrLoad( if (maybe_array_ty) |load_ty| { // It's possible that we're loading a [N]T, in which case we'd like to slice // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, sema.mod)) { + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), - .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), + .ty = try Type.array(sema.arena, N, null, elem_ty, mod), + .val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N), } else null; break :blk deref; } @@ -28209,7 +28173,7 @@ fn beginComptimePtrLoad( } deref.pointee = TypedValue{ .ty = elem_ty, - .val = try array_tv.val.elemValue(sema.mod, sema.arena, elem_ptr.index), + .val = try array_tv.val.elemValue(mod, elem_ptr.index), }; break :blk deref; }, @@ -28329,12 +28293,6 @@ fn beginComptimePtrLoad( break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); }, - .zero, - .one, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, .variable, .extern_fn, .function, @@ -28342,7 +28300,10 @@ fn beginComptimePtrLoad( else => unreachable, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .int => return error.RuntimeLoad, + else => unreachable, + }, }; if (deref.pointee) |tv| { @@ -28373,9 +28334,9 @@ fn bitCast( if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ - dest_ty.fmt(sema.mod), + dest_ty.fmt(mod), dest_bits, - old_ty.fmt(sema.mod), + old_ty.fmt(mod), old_bits, }); } @@ -28407,6 +28368,7 @@ fn bitCastVal( const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => return null, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}), @@ -28427,7 +28389,7 @@ fn coerceArrayPtrToSlice( const array_ty = ptr_array_ty.childType(mod); const slice_val = try Value.Tag.slice.create(sema.arena, .{ .ptr = val, - .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)), + .len = try mod.intValue(Type.usize, array_ty.arrayLen(mod)), }); return sema.addConstant(dest_ty, slice_val); } @@ -28781,7 +28743,7 @@ fn coerceArrayLike( for (element_vals, 0..) |*elem, i| { const index_ref = try sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(sema.arena, i), + try mod.intValue(Type.usize, i), ); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location @@ -29634,7 +29596,7 @@ fn analyzeSlice( var end_is_len = uncasted_end_opt == .none; const end = e: { if (array_ty.zigTypeTag(mod) == .Array) { - const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen(mod)); + const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod)); if (!end_is_len) { const end = if (by_length) end: { @@ -29643,8 +29605,8 @@ fn analyzeSlice( break :end try sema.coerce(block, Type.usize, uncasted_end, end_src); } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveMaybeUndefVal(end)) |end_val| { - const len_s_val = try Value.Tag.int_u64.create( - sema.arena, + const len_s_val = try mod.intValue( + Type.usize, array_ty.arrayLenIncludingSentinel(mod), ); if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) { @@ -29689,12 +29651,10 @@ fn analyzeSlice( return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; - var int_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel), - }; - const slice_len_val = Value.initPayload(&int_payload.base); - if (!(try sema.compareAll(end_val, .lte, slice_len_val, Type.usize))) { + const slice_len = slice_val.sliceLen(mod); + const len_plus_sent = slice_len + @boolToInt(has_sentinel); + const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent); + if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { const sentinel_label: []const u8 = if (has_sentinel) " +1 (sentinel)" else @@ -29712,13 +29672,10 @@ fn analyzeSlice( ); } - // If the slice has a sentinel, we subtract one so that - // end_is_len is only true if it equals the length WITHOUT - // the sentinel, so we don't add a sentinel type. - if (has_sentinel) { - int_payload.data -= 1; - } - + // If the slice has a sentinel, we consider end_is_len + // is only true if it equals the length WITHOUT the + // sentinel, so we don't add a sentinel type. + const slice_len_val = try mod.intValue(Type.usize, slice_len); if (end_val.eql(slice_len_val, Type.usize, mod)) { end_is_len = true; } @@ -30134,7 +30091,7 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128)); + var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod)); defer bigint.deinit(); if (lhs_val.floatHasFraction()) { if (lhs_is_signed) { @@ -30193,7 +30150,7 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128)); + var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod)); defer bigint.deinit(); if (rhs_val.floatHasFraction()) { if (rhs_is_signed) { @@ -31835,6 +31792,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, + .negative_one => unreachable, .calling_convention_c => unreachable, .calling_convention_inline => unreachable, .void_value => unreachable, @@ -32462,11 +32420,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (fields_len > 0) { - var field_count_val: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = fields_len - 1, - }; - if (!(try sema.intFitsInType(Value.initPayload(&field_count_val.base), int_tag_ty, null))) { + const field_count_val = try mod.intValue(int_tag_ty, fields_len - 1); + if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) { const msg = msg: { const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); @@ -33207,7 +33162,8 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { - return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); + const mod = sema.mod; + return sema.addConstant(ty, try mod.intValue(ty, int)); } fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { @@ -33223,7 +33179,11 @@ pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { .tag = .interned, .data = .{ .interned = val.ip_index }, }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + // This assertion can be removed when the `ty` parameter is removed from + // this function thanks to the InternPool transition being complete. + assert(Type.eql(sema.typeOf(result), ty, sema.mod)); + return result; } const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); @@ -33833,19 +33793,18 @@ fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty); } return Value.Tag.aggregate.create(sema.arena, result_data); } - return sema.intAddScalar(lhs, rhs); + return sema.intAddScalar(lhs, rhs, ty); } -fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { +fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -33859,7 +33818,7 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); - return Value.fromBigInt(sema.arena, result_bigint.toConst()); + return mod.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -33884,28 +33843,22 @@ fn numberAddWrapScalar( return overflow_result.wrapped_result; } -fn intSub( - sema: *Sema, - lhs: Value, - rhs: Value, - ty: Type, -) !Value { +fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty); } return Value.Tag.aggregate.create(sema.arena, result_data); } - return sema.intSubScalar(lhs, rhs); + return sema.intSubScalar(lhs, rhs, ty); } -fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { +fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -33919,7 +33872,7 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); - return Value.fromBigInt(sema.arena, result_bigint.toConst()); + return mod.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -33954,10 +33907,8 @@ fn floatAdd( if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -33971,31 +33922,32 @@ fn floatAddScalar( rhs: Value, float_type: Type, ) !Value { + const mod = sema.mod; const target = sema.mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(sema.arena, lhs_val + rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(sema.arena, lhs_val + rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(sema.arena, lhs_val + rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(sema.arena, lhs_val + rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(sema.arena, lhs_val + rhs_val); }, else => unreachable, @@ -34012,10 +33964,8 @@ fn floatSub( if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try sema.arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -34029,31 +33979,32 @@ fn floatSubScalar( rhs: Value, float_type: Type, ) !Value { + const mod = sema.mod; const target = sema.mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(sema.arena, lhs_val - rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(sema.arena, lhs_val - rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(sema.arena, lhs_val - rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(sema.arena, lhs_val - rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(sema.arena, lhs_val - rhs_val); }, else => unreachable, @@ -34071,10 +34022,8 @@ fn intSubWithOverflow( const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; @@ -34106,7 +34055,7 @@ fn intSubWithOverflowScalar( ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const wrapped_result = try Value.fromBigInt(sema.arena, result_bigint.toConst()); + const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ .overflow_bit = Value.boolToInt(overflowed), .wrapped_result = wrapped_result, @@ -34126,8 +34075,7 @@ fn floatToInt( const elem_ty = float_ty.childType(mod); const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(sema.mod, i, &buf); + const elem_val = try val.elemValue(sema.mod, i); scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod)); } return Value.Tag.aggregate.create(sema.arena, result_data); @@ -34168,9 +34116,9 @@ fn floatToIntScalar( float_ty: Type, int_ty: Type, ) CompileError!Value { - const Limb = std.math.big.Limb; + const mod = sema.mod; - const float = val.toFloat(f128); + const float = val.toFloat(f128, mod); if (std.math.isNan(float)) { return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{ int_ty.fmt(sema.mod), @@ -34185,11 +34133,7 @@ fn floatToIntScalar( var big_int = try float128IntPartToBigInt(sema.arena, float); defer big_int.deinit(); - const result_limbs = try sema.arena.dupe(Limb, big_int.toConst().limbs); - const result = if (!big_int.isPositive()) - try Value.Tag.int_big_negative.create(sema.arena, result_limbs) - else - try Value.Tag.int_big_positive.create(sema.arena, result_limbs); + const result = try mod.intValue_big(int_ty, big_int.toConst()); if (!(try sema.intFitsInType(result, int_ty, null))) { return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{ @@ -34209,8 +34153,8 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { + if (ty.ip_index == .comptime_int_type) return true; const mod = sema.mod; - const target = mod.getTarget(); switch (val.ip_index) { .undef, .zero, @@ -34218,103 +34162,26 @@ fn intFitsInType( .zero_u8, => return true, - .one, - .one_usize, - => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return switch (info.signedness) { - .signed => info.bits >= 2, - .unsigned => info.bits >= 1, - }; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .none => switch (val.tag()) { - .zero => return true, - - .one => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return switch (info.signedness) { - .signed => info.bits >= 2, - .unsigned => info.bits >= 1, - }; - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .lazy_align => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); - // If it is u16 or bigger we know the alignment fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .lazy_size => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); - // If it is u64 or bigger we know the size fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .int_u64 => switch (ty.zigTypeTag(mod)) { - .Int => { - const x = val.castTag(.int_u64).?.data; - if (x == 0) return true; - const info = ty.intInfo(mod); - const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_i64 => switch (ty.zigTypeTag(mod)) { - .Int => { - const x = val.castTag(.int_i64).?.data; - if (x == 0) return true; - const info = ty.intInfo(mod); - if (info.signedness == .unsigned and x < 0) - return false; - var buffer: Value.BigIntSpace = undefined; - return (try val.toBigIntAdvanced(&buffer, mod, sema)).fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_positive => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, + .lazy_align => { + const info = ty.intInfo(mod); + const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); + // If it is u16 or bigger we know the alignment fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; }, - .int_big_negative => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, + .lazy_size => { + const info = ty.intInfo(mod); + const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); + // If it is u64 or bigger we know the size fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; }, .the_only_possible_value => { @@ -34327,17 +34194,14 @@ fn intFitsInType( .decl_ref, .function, .variable, - => switch (ty.zigTypeTag(mod)) { - .Int => { - const info = ty.intInfo(mod); - const ptr_bits = target.ptrBitWidth(); - return switch (info.signedness) { - .signed => info.bits > ptr_bits, - .unsigned => info.bits >= ptr_bits, - }; - }, - .ComptimeInt => return true, - else => unreachable, + => { + const info = ty.intInfo(mod); + const target = mod.getTarget(); + const ptr_bits = target.ptrBitWidth(); + return switch (info.signedness) { + .signed => info.bits > ptr_bits, + .unsigned => info.bits >= ptr_bits, + }; }, .aggregate => { @@ -34354,22 +34218,22 @@ fn intFitsInType( else => unreachable, }, - else => @panic("TODO"), + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| { + const info = ty.intInfo(mod); + var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return big_int.fitsInTwosComp(info.signedness, info.bits); + }, + else => unreachable, + }, } } -fn intInRange( - sema: *Sema, - tag_ty: Type, - int_val: Value, - end: usize, -) !bool { +fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { + const mod = sema.mod; if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false; - var end_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = end, - }; - const end_val = Value.initPayload(&end_payload.base); + const end_val = try mod.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; } @@ -34426,10 +34290,8 @@ fn intAddWithOverflow( const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; @@ -34461,7 +34323,7 @@ fn intAddWithOverflowScalar( ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const result = try Value.fromBigInt(sema.arena, result_bigint.toConst()); + const result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ .overflow_bit = Value.boolToInt(overflowed), .wrapped_result = result, @@ -34483,10 +34345,8 @@ fn compareAll( if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; while (i < ty.vectorLen(mod)) : (i += 1) { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) { return false; } @@ -34532,10 +34392,8 @@ fn compareVector( assert(ty.zigTypeTag(mod) == .Vector); const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); scalar.* = Value.makeBool(res_bool); } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index ee9a8abf0f..28212a164c 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -41,8 +41,8 @@ pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, mod: *Module) void { return tv.val.hash(tv.ty, hasher, mod); } -pub fn enumToInt(tv: TypedValue, buffer: *Value.Payload.U64) Value { - return tv.val.enumToInt(tv.ty, buffer); +pub fn enumToInt(tv: TypedValue, mod: *Module) Allocator.Error!Value { + return tv.val.enumToInt(tv.ty, mod); } const max_aggregate_items = 100; @@ -157,14 +157,8 @@ pub fn print( return writer.writeAll(" }"); }, - .zero => return writer.writeAll("0"), - .one => return writer.writeAll("1"), .the_only_possible_value => return writer.writeAll("0"), .ty => return val.castTag(.ty).?.data.print(writer, mod), - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), - .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), .lazy_align => { const sub_ty = val.castTag(.lazy_align).?.data; const x = sub_ty.abiAlignment(mod); @@ -313,8 +307,9 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - var elem_buf: Value.ElemValueBuffer = undefined; - const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; if (elem_val.isUndef()) break :str; buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } @@ -330,10 +325,12 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); - var buf: Value.ElemValueBuffer = undefined; + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; try print(.{ .ty = elem_ty, - .val = payload.ptr.elemValueBuffer(mod, i, &buf), + .val = elem_val, }, writer, level - 1, mod); } if (len > max_aggregate_items) { diff --git a/src/Zir.zig b/src/Zir.zig index 8c03dfd060..34479cce5e 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2120,6 +2120,7 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), + negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), void_value = @enumToInt(InternPool.Index.void_value), diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index ea7134c603..327e2c13e0 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3083,20 +3083,21 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, .Float => switch (ty.floatBits(func.target)) { - 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) }, - 32 => return WValue{ .float32 = val.toFloat(f32) }, - 64 => return WValue{ .float64 = val.toFloat(f64) }, + 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16, mod)) }, + 32 => return WValue{ .float32 = val.toFloat(f32, mod) }, + 64 => return WValue{ .float64 = val.toFloat(f64, mod) }, else => unreachable, }, - .Pointer => switch (val.ip_index) { - .null_value => return WValue{ .imm32 = 0 }, + .Pointer => return switch (val.ip_index) { + .null_value => WValue{ .imm32 = 0 }, .none => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), - .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, - .zero => return WValue{ .imm32 = 0 }, + .field_ptr, .elem_ptr, .opt_payload_ptr => func.lowerParentPtr(val, 0), else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| WValue{ .imm32 = @intCast(u32, int.storage.u64) }, + else => unreachable, + }, }, .Enum => { if (val.castTag(.enum_field_index)) |field_index| { @@ -3137,7 +3138,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); - const err_val = if (!is_pl) val else Value.initTag(.zero); + const err_val = if (!is_pl) val else Value.zero; return func.lowerConstant(err_val, error_type); } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); @@ -3160,11 +3161,10 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { assert(struct_obj.layout == .Packed); var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; - var payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = std.mem.readIntLittle(u64, &buf), - }; - const int_val = Value.initPayload(&payload.base); + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); return func.lowerConstant(int_val, struct_obj.backing_int_ty); }, .Vector => { @@ -4899,8 +4899,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { - var buf: Value.ElemValueBuffer = undefined; - const value = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); + const value = (try mask.elemValue(mod, index)).toSignedInt(mod); try func.emitWValue(result); @@ -4920,8 +4919,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lanes = std.mem.asBytes(operands[1..]); for (0..@intCast(usize, mask_len)) |index| { - var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); + const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); const base_index = if (mask_elem >= 0) @intCast(u8, @intCast(i64, elem_size) * mask_elem) else diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ee604afd0f..51c6bc79e6 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2757,11 +2757,8 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { dst_ty.fmt(self.bin_file.options.module.?), }); - var mask_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits), - }; - const mask_val = Value.initPayload(&mask_pl.base); + const elem_ty = src_ty.childType(mod); + const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits)); var splat_pl = Value.Payload.SubValue{ .base = .{ .tag = .repeated }, @@ -4906,18 +4903,6 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { defer arena.deinit(); const ExpectedContents = struct { - scalar: union { - i64: Value.Payload.I64, - big: struct { - limbs: [ - @max( - std.math.big.int.Managed.default_capacity, - std.math.big.int.calcTwosCompLimbCount(128), - ) - ]std.math.big.Limb, - pl: Value.Payload.BigInt, - }, - }, repeated: Value.Payload.SubValue, }; var stack align(@alignOf(ExpectedContents)) = @@ -11429,8 +11414,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; const tag_val = Value.initPayload(&tag_pl.base); - var tag_int_pl: Value.Payload.U64 = undefined; - const tag_int_val = tag_val.enumToInt(tag_ty, &tag_int_pl); + const tag_int_val = try tag_val.enumToInt(tag_ty, mod); const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) diff --git a/src/codegen.zig b/src/codegen.zig index 5f5a3f66be..9c9868892f 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -214,15 +214,15 @@ pub fn generateSymbol( }, .Float => { switch (typed_value.ty.floatBits(target)) { - 16 => writeFloat(f16, typed_value.val.toFloat(f16), target, endian, try code.addManyAsArray(2)), - 32 => writeFloat(f32, typed_value.val.toFloat(f32), target, endian, try code.addManyAsArray(4)), - 64 => writeFloat(f64, typed_value.val.toFloat(f64), target, endian, try code.addManyAsArray(8)), + 16 => writeFloat(f16, typed_value.val.toFloat(f16, mod), target, endian, try code.addManyAsArray(2)), + 32 => writeFloat(f32, typed_value.val.toFloat(f32, mod), target, endian, try code.addManyAsArray(4)), + 64 => writeFloat(f64, typed_value.val.toFloat(f64, mod), target, endian, try code.addManyAsArray(8)), 80 => { - writeFloat(f80, typed_value.val.toFloat(f80), target, endian, try code.addManyAsArray(10)); + writeFloat(f80, typed_value.val.toFloat(f80, mod), target, endian, try code.addManyAsArray(10)); const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0, abi_size - 10); }, - 128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)), + 128 => writeFloat(f128, typed_value.val.toFloat(f128, mod), target, endian, try code.addManyAsArray(16)), else => unreachable, } return Result.ok; @@ -328,20 +328,6 @@ pub fn generateSymbol( return Result.ok; }, .none => switch (typed_value.val.tag()) { - .zero, .one, .int_u64, .int_big_positive => { - switch (target.ptrBitWidth()) { - 32 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); - }, - 64 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - }, - else => unreachable, - } - return Result.ok; - }, .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( bin_file, src_loc, @@ -399,7 +385,23 @@ pub fn generateSymbol( ), }, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .int => { + switch (target.ptrBitWidth()) { + 32 => { + const x = typed_value.val.toUnsignedInt(mod); + mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); + }, + 64 => { + const x = typed_value.val.toUnsignedInt(mod); + mem.writeInt(u64, try code.addManyAsArray(8), x, endian); + }, + else => unreachable, + } + return Result.ok; + }, + else => unreachable, + }, }, .Int => { const info = typed_value.ty.intInfo(mod); @@ -449,8 +451,7 @@ pub fn generateSymbol( return Result.ok; }, .Enum => { - var int_buffer: Value.Payload.U64 = undefined; - const int_val = typed_value.enumToInt(&int_buffer); + const int_val = try typed_value.enumToInt(mod); const info = typed_value.ty.intInfo(mod); if (info.bits <= 8) { @@ -674,7 +675,7 @@ pub fn generateSymbol( const is_payload = typed_value.val.errorUnionIsPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val; + const err_val = if (is_payload) Value.zero else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, .val = err_val, @@ -689,7 +690,7 @@ pub fn generateSymbol( if (error_align > payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.initTag(.zero) else typed_value.val, + .val = if (is_payload) Value.zero else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -721,7 +722,7 @@ pub fn generateSymbol( const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.initTag(.zero) else typed_value.val, + .val = if (is_payload) Value.zero else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -961,13 +962,9 @@ fn lowerDeclRef( } // generate length - var slice_len: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = typed_value.val.sliceLen(mod), - }; switch (try generateSymbol(bin_file, src_loc, .{ .ty = Type.usize, - .val = Value.initPayload(&slice_len.base), + .val = try mod.intValue(Type.usize, typed_value.val.sliceLen(mod)), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -1196,13 +1193,13 @@ pub fn genTypedValue( .null_value => { return GenResult.mcv(.{ .immediate = 0 }); }, - .none => switch (typed_value.val.tag()) { - .int_u64 => { + .none => {}, + else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .int => { return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, else => {}, }, - else => {}, }, }, .Int => { @@ -1283,7 +1280,7 @@ pub fn genTypedValue( if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); + const err_val = if (!is_pl) typed_value.val else Value.zero; return genTypedValue(bin_file, src_loc, .{ .ty = error_type, .val = err_val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 039c75de67..9443c2298a 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -568,11 +568,7 @@ pub const DeclGen = struct { var buf: Type.SlicePtrFieldTypeBuffer = undefined; try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), val.slicePtr(), .Initializer); - var len_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = val.sliceLen(mod), - }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, val.sliceLen(mod)); if (location == .StaticInitializer) { return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); @@ -596,11 +592,17 @@ pub const DeclGen = struct { if (need_typecast) try writer.writeByte(')'); } - // Renders a "parent" pointer by recursing to the root decl/variable - // that its contents are defined with respect to. - // - // Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr - fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { + /// Renders a "parent" pointer by recursing to the root decl/variable + /// that its contents are defined with respect to. + /// + /// Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr + fn renderParentPtr( + dg: *DeclGen, + writer: anytype, + ptr_val: Value, + ptr_ty: Type, + location: ValueRenderLocation, + ) error{ OutOfMemory, AnalysisFail }!void { const mod = dg.module; if (!ptr_ty.isSlice(mod)) { @@ -608,8 +610,11 @@ pub const DeclGen = struct { try dg.renderType(writer, ptr_ty); try writer.writeByte(')'); } + if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .int => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), + else => unreachable, + }; switch (ptr_val.tag()) { - .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), .decl_ref_mut, .decl_ref, .variable => { const decl_index = switch (ptr_val.tag()) { .decl_ref => ptr_val.castTag(.decl_ref).?.data, @@ -661,11 +666,7 @@ pub const DeclGen = struct { u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try dg.renderType(writer, u8_ptr_ty); @@ -891,7 +892,7 @@ pub const DeclGen = struct { }, .Array, .Vector => { const ai = ty.arrayInfo(mod); - if (ai.elem_type.eql(Type.u8, dg.module)) { + if (ai.elem_type.eql(Type.u8, mod)) { var literal = stringLiteral(writer); try literal.start(); const c_len = ty.arrayLenIncludingSentinel(mod); @@ -949,7 +950,7 @@ pub const DeclGen = struct { }, .Float => { const bits = ty.floatBits(target); - const f128_val = val.toFloat(f128); + const f128_val = val.toFloat(f128, mod); // All unsigned ints matching float types are pre-allocated. const repr_ty = mod.intType(.unsigned, bits) catch unreachable; @@ -963,21 +964,15 @@ pub const DeclGen = struct { }; switch (bits) { - 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16))), - 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32))), - 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64))), - 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80))), + 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), + 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), + 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), + 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), 128 => repr_val_big.set(@bitCast(u128, f128_val)), else => unreachable, } - var repr_val_pl = Value.Payload.BigInt{ - .base = .{ - .tag = if (repr_val_big.positive) .int_big_positive else .int_big_negative, - }, - .data = repr_val_big.limbs[0..repr_val_big.len], - }; - const repr_val = Value.initPayload(&repr_val_pl.base); + const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -988,10 +983,10 @@ pub const DeclGen = struct { try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16)}), - 32 => try writer.print("{x}", .{val.toFloat(f32)}), - 64 => try writer.print("{x}", .{val.toFloat(f64)}), - 80 => try writer.print("{x}", .{val.toFloat(f80)}), + 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), 128 => try writer.print("{x}", .{f128_val}), else => unreachable, } @@ -1031,10 +1026,10 @@ pub const DeclGen = struct { if (std.math.isNan(f128_val)) switch (bits) { // We only actually need to pass the significand, but it will get // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16))}), - 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32))}), - 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64))}), - 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80))}), + 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), + 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), + 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), + 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), else => unreachable, }; @@ -1060,19 +1055,6 @@ pub const DeclGen = struct { try writer.writeAll(")NULL)"); }, .none => switch (val.tag()) { - .zero => if (ty.isSlice(mod)) { - var slice_pl = Value.Payload.Slice{ - .base = .{ .tag = .slice }, - .data = .{ .ptr = val, .len = Value.undef }, - }; - const slice_val = Value.initPayload(&slice_pl.base); - - return dg.renderValue(writer, ty, slice_val, location); - } else { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - try writer.writeAll(")NULL)"); - }, .variable => { const decl = val.castTag(.variable).?.data.owner_decl; return dg.renderDeclValue(writer, ty, val, decl, location); @@ -1101,7 +1083,7 @@ pub const DeclGen = struct { const extern_fn = val.castTag(.extern_fn).?.data; try dg.renderDeclName(writer, extern_fn.owner_decl, 0); }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { + .lazy_align, .lazy_size => { try writer.writeAll("(("); try dg.renderType(writer, ty); return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); @@ -1116,7 +1098,14 @@ pub const DeclGen = struct { else => unreachable, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + }, + else => unreachable, + }, }, .Array, .Vector => { if (location == .FunctionArgument) { @@ -1155,7 +1144,7 @@ pub const DeclGen = struct { .bytes => val.castTag(.bytes).?.data, .str_lit => bytes: { const str_lit = val.castTag(.str_lit).?.data; - break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + break :bytes mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; }, else => unreachable, }; @@ -1170,21 +1159,18 @@ pub const DeclGen = struct { else => {}, } // Fall back to generic implementation. - var arena = std.heap.ArenaAllocator.init(dg.gpa); - defer arena.deinit(); - const arena_allocator = arena.allocator(); // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal const max_string_initializer_len = 65535; const ai = ty.arrayInfo(mod); - if (ai.elem_type.eql(Type.u8, dg.module)) { + if (ai.elem_type.eql(Type.u8, mod)) { if (ai.len <= max_string_initializer_len) { var literal = stringLiteral(writer); try literal.start(); var index: usize = 0; while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val = try val.elemValue(mod, index); const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try literal.writeChar(elem_val_u8); } @@ -1198,7 +1184,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val = try val.elemValue(mod, index); const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try writer.print("'\\x{x}'", .{elem_val_u8}); } @@ -1213,7 +1199,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); + const elem_val = try val.elemValue(mod, index); try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); } if (ai.sentinel) |s| { @@ -1361,8 +1347,7 @@ pub const DeclGen = struct { const bits = Type.smallestUnsignedBits(int_info.bits - 1); const bit_offset_ty = try mod.intType(.unsigned, bits); - var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + var bit_offset: u64 = 0; var eff_num_fields: usize = 0; for (0..field_vals.len) |field_i| { @@ -1394,12 +1379,13 @@ pub const DeclGen = struct { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const cast_context = IntCastContext{ .value = .{ .value = field_val } }; - if (bit_offset_val_pl.data != 0) { + if (bit_offset != 0) { try writer.writeAll("zig_shl_"); try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try writer.writeByte(')'); } else { @@ -1409,7 +1395,7 @@ pub const DeclGen = struct { if (needs_closing_paren) try writer.writeByte(')'); if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); - bit_offset_val_pl.data += field_ty.bitSize(mod); + bit_offset += field_ty.bitSize(mod); needs_closing_paren = true; eff_index += 1; } @@ -1427,15 +1413,16 @@ pub const DeclGen = struct { try dg.renderType(writer, ty); try writer.writeByte(')'); - if (bit_offset_val_pl.data != 0) { + if (bit_offset != 0) { try dg.renderValue(writer, field_ty, field_val, .Other); try writer.writeAll(" << "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); } else { try dg.renderValue(writer, field_ty, field_val, .Other); } - bit_offset_val_pl.data += field_ty.bitSize(mod); + bit_offset += field_ty.bitSize(mod); empty = false; } try writer.writeByte(')'); @@ -1451,7 +1438,7 @@ pub const DeclGen = struct { try writer.writeByte(')'); } - const field_i = ty.unionTagFieldIndex(union_obj.tag, dg.module).?; + const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?; const field_ty = ty.unionFields().values()[field_i].ty; const field_name = ty.unionFields().keys()[field_i]; if (ty.containerLayout() == .Packed) { @@ -1951,10 +1938,10 @@ pub const DeclGen = struct { if (is_big) try writer.print(", {}", .{int_info.signedness == .signed}); - var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = int_info.bits }; + const bits_ty = if (is_big) Type.u16 else Type.u8; try writer.print(", {}", .{try dg.fmtIntLiteral( - if (is_big) Type.u16 else Type.u8, - Value.initPayload(&bits_pl.base), + bits_ty, + try mod.intValue(bits_ty, int_info.bits), .FunctionArgument, )}); } @@ -2495,8 +2482,7 @@ pub fn genErrDecls(o: *Object) !void { for (mod.error_name_list.items, 0..) |name, value| { if (value != 0) try writer.writeByte(','); - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, name.len); try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{ fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val, .Other), @@ -2548,8 +2534,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { }; const tag_val = Value.initPayload(&tag_pl.base); - var int_pl: Value.Payload.U64 = undefined; - const int_val = tag_val.enumToInt(enum_ty, &int_pl); + const int_val = try tag_val.enumToInt(enum_ty, mod); const name_ty = try mod.arrayType(.{ .len = name.len, @@ -2560,8 +2545,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { var name_pl = Value.Payload.Bytes{ .base = .{ .tag = .bytes }, .data = name }; const name_val = Value.initPayload(&name_pl.base); - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = name.len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, name.len); try w.print(" case {}: {{\n static ", .{ try o.dg.fmtIntLiteral(enum_ty, int_val, .Other), @@ -3396,12 +3380,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { const host_ty = try mod.intType(.unsigned, host_bits); const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); - - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_info.bit_offset, - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset); const field_ty = try mod.intType(.unsigned, @intCast(u16, src_ty.bitSize(mod))); @@ -3563,14 +3542,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try v.elem(f, writer); } else switch (dest_int_info.signedness) { .unsigned => { - var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa); - defer arena.deinit(); - - const ExpectedContents = union { u: Value.Payload.U64, i: Value.Payload.I64 }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - - const mask_val = try inst_scalar_ty.maxInt(stack.get(), mod); + const mask_val = try inst_scalar_ty.maxIntScalar(mod); try writer.writeAll("zig_and_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); try writer.writeByte('('); @@ -3581,11 +3553,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { .signed => { const c_bits = toCIntBits(scalar_int_info.bits) orelse return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); - var shift_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = c_bits - dest_bits, - }; - const shift_val = Value.initPayload(&shift_pl.base); + const shift_val = try mod.intValue(Type.u8, c_bits - dest_bits); try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); @@ -3705,12 +3673,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const host_ty = try mod.intType(.unsigned, host_bits); const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1)); - - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_info.bit_offset, - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + const bit_offset_val = try mod.intValue(bit_offset_ty, ptr_info.bit_offset); const src_bits = src_ty.bitSize(mod); @@ -3725,11 +3688,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try mask.shiftLeft(&mask, ptr_info.bit_offset); try mask.bitNotWrap(&mask, .unsigned, host_bits); - var mask_pl = Value.Payload.BigInt{ - .base = .{ .tag = .int_big_positive }, - .data = mask.limbs[0..mask.len()], - }; - const mask_val = Value.initPayload(&mask_pl.base); + const mask_val = try mod.intValue_big(host_ty, mask.toConst()); try f.writeCValueDeref(writer, ptr_val); try v.elem(f, writer); @@ -5356,11 +5315,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5412,11 +5367,7 @@ fn fieldPtr( u8_ptr_pl.data.pointee_type = Type.u8; const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5466,11 +5417,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - var bit_offset_val_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = struct_obj.packedFieldBitOffset(mod, extra.field_index), - }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); const field_int_signedness = if (inst_ty.isAbiInt(mod)) inst_ty.intInfo(mod).signedness @@ -5492,13 +5440,13 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); } - if (bit_offset_val_pl.data > 0) { + if (bit_offset > 0) { try writer.writeAll("zig_shr_"); try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); try writer.writeByte('('); } try f.writeCValue(writer, struct_byval, .Other); - if (bit_offset_val_pl.data > 0) { + if (bit_offset > 0) { try writer.writeAll(", "); try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try writer.writeByte(')'); @@ -5854,9 +5802,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); - const array_len = array_ty.arrayLen(mod); - var len_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = array_len }; - const len_val = Value.initPayload(&len_pl.base); + const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod)); try f.writeCValueMember(writer, local, .{ .identifier = "len" }); try writer.print(" = {};\n", .{try f.fmtIntLiteral(Type.usize, len_val)}); @@ -6632,26 +6578,17 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); try reap(f, inst, &.{ extra.a, extra.b }); // local cannot alias operands for (0..extra.mask_len) |index| { - var dst_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @intCast(u64, index), - }; - try f.writeCValue(writer, local, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&dst_pl.base), .Other); + try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, index), .Other); try writer.writeAll("] = "); - var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(mod, index, &buf).toSignedInt(mod); - var src_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @intCast(u64, mask_elem ^ mask_elem >> 63), - }; + const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); + const src_val = try mod.intValue(Type.usize, @intCast(u64, mask_elem ^ mask_elem >> 63)); try f.writeCValue(writer, if (mask_elem >= 0) lhs else rhs, .Other); try writer.writeByte('['); - try f.object.dg.renderValue(writer, Type.usize, Value.initPayload(&src_pl.base), .Other); + try f.object.dg.renderValue(writer, Type.usize, src_val, .Other); try writer.writeAll("];\n"); } @@ -6730,8 +6667,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { defer arena.deinit(); const ExpectedContents = union { - u: Value.Payload.U64, - i: Value.Payload.I64, f16: Value.Payload.Float_16, f32: Value.Payload.Float_32, f64: Value.Payload.Float_64, @@ -6746,13 +6681,13 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .And => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, else => switch (scalar_ty.intInfo(mod).signedness) { - .unsigned => try scalar_ty.maxInt(stack.get(), mod), + .unsigned => try scalar_ty.maxIntScalar(mod), .signed => Value.negative_one, }, }, .Min => switch (scalar_ty.zigTypeTag(mod)) { .Bool => Value.one, - .Int => try scalar_ty.maxInt(stack.get(), mod), + .Int => try scalar_ty.maxIntScalar(mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, @@ -6879,8 +6814,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); + var bit_offset: u64 = 0; var empty = true; for (0..elements.len) |field_i| { @@ -6925,12 +6859,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); try writer.writeByte(')'); if (!empty) try writer.writeByte(')'); - bit_offset_val_pl.data += field_ty.bitSize(mod); + bit_offset += field_ty.bitSize(mod); empty = false; } @@ -6976,8 +6911,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { }; const tag_val = Value.initPayload(&tag_pl.base); - var int_pl: Value.Payload.U64 = undefined; - const int_val = tag_val.enumToInt(tag_ty, &int_pl); + const int_val = try tag_val.enumToInt(tag_ty, mod); const a = try Assignment.start(f, writer, tag_ty); try f.writeCValueMember(writer, local, .{ .identifier = "tag" }); @@ -7640,10 +7574,6 @@ fn formatIntLiteral( c_limb_int_info.signedness = .unsigned; c_limb_cty = c_limb_info.cty; } - var c_limb_val_pl = Value.Payload.BigInt{ - .base = .{ .tag = if (c_limb_mut.positive) .int_big_positive else .int_big_negative }, - .data = c_limb_mut.limbs[0..c_limb_mut.len], - }; if (limb_offset > 0) try writer.writeAll(", "); try formatIntLiteral(.{ @@ -7651,7 +7581,7 @@ fn formatIntLiteral( .int_info = c_limb_int_info, .kind = data.kind, .cty = c_limb_cty, - .val = Value.initPayload(&c_limb_val_pl.base), + .val = try mod.intValue_big(Type.comptime_int, c_limb_mut.toConst()), }, fmt, options, writer); } } @@ -7750,7 +7680,7 @@ const Vectorize = struct { pub fn start(f: *Function, inst: Air.Inst.Index, writer: anytype, ty: Type) !Vectorize { const mod = f.object.dg.module; return if (ty.zigTypeTag(mod) == .Vector) index: { - var len_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = ty.vectorLen(mod) }; + const len_val = try mod.intValue(Type.usize, ty.vectorLen(mod)); const local = try f.allocLocal(inst, Type.usize); @@ -7759,7 +7689,7 @@ const Vectorize = struct { try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); try f.writeCValue(writer, local, .Other); try writer.print(" < {d}; ", .{ - try f.fmtIntLiteral(Type.usize, Value.initPayload(&len_pl.base)), + try f.fmtIntLiteral(Type.usize, len_val), }); try f.writeCValue(writer, local, .Other); try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)}); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5ad3c2b8c5..9b62c5448d 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -12,6 +12,7 @@ const link = @import("../link.zig"); const Compilation = @import("../Compilation.zig"); const build_options = @import("build_options"); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); const TypedValue = @import("../TypedValue.zig"); const Air = @import("../Air.zig"); @@ -1535,8 +1536,7 @@ pub const Object = struct { defer gpa.free(field_name_z); buf_field_index.data = @intCast(u32, i); - var buf_u64: Value.Payload.U64 = undefined; - const field_int_val = field_index_val.enumToInt(ty, &buf_u64); + const field_int_val = try field_index_val.enumToInt(ty, mod); var bigint_space: Value.BigIntSpace = undefined; const bigint = field_int_val.toBigInt(&bigint_space, mod); @@ -3255,8 +3255,6 @@ pub const DeclGen = struct { const llvm_type = try dg.lowerType(tv.ty); return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); }, - // TODO this duplicates code with Pointer but they should share the handling - // of the tv.val.tag() and then Int should do extra constPtrToInt on top .Int => switch (tv.val.ip_index) { .none => switch (tv.val.tag()) { .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), @@ -3277,8 +3275,7 @@ pub const DeclGen = struct { }, }, .Enum => { - var int_buffer: Value.Payload.U64 = undefined; - const int_val = tv.enumToInt(&int_buffer); + const int_val = try tv.enumToInt(mod); var bigint_space: Value.BigIntSpace = undefined; const bigint = int_val.toBigInt(&bigint_space, mod); @@ -3307,25 +3304,25 @@ pub const DeclGen = struct { const llvm_ty = try dg.lowerType(tv.ty); switch (tv.ty.floatBits(target)) { 16 => { - const repr = @bitCast(u16, tv.val.toFloat(f16)); + const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); const llvm_i16 = dg.context.intType(16); const int = llvm_i16.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 32 => { - const repr = @bitCast(u32, tv.val.toFloat(f32)); + const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); const llvm_i32 = dg.context.intType(32); const int = llvm_i32.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 64 => { - const repr = @bitCast(u64, tv.val.toFloat(f64)); + const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); const llvm_i64 = dg.context.intType(64); const int = llvm_i64.constInt(repr, .False); return int.constBitCast(llvm_ty); }, 80 => { - const float = tv.val.toFloat(f80); + const float = tv.val.toFloat(f80, mod); const repr = std.math.break_f80(float); const llvm_i80 = dg.context.intType(80); var x = llvm_i80.constInt(repr.exp, .False); @@ -3338,7 +3335,7 @@ pub const DeclGen = struct { } }, 128 => { - var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128)); + var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); // LLVM seems to require that the lower half of the f128 be placed first // in the buffer. if (native_endian == .Big) { @@ -3388,7 +3385,7 @@ pub const DeclGen = struct { }; return dg.context.constStruct(&fields, fields.len, .False); }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { + .lazy_align, .lazy_size => { const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); @@ -3396,10 +3393,6 @@ pub const DeclGen = struct { .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); }, - .zero => { - const llvm_type = try dg.lowerType(tv.ty); - return llvm_type.constNull(); - }, .opt_payload => { const payload = tv.val.castTag(.opt_payload).?.data; return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); @@ -3408,7 +3401,10 @@ pub const DeclGen = struct { tv.ty.fmtDebug(), tag, }), }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .int => |int| return lowerIntAsPtr(dg, int), + else => unreachable, + }, }, .Array => switch (tv.val.tag()) { .bytes => { @@ -3592,7 +3588,7 @@ pub const DeclGen = struct { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else Value.initTag(.zero); + const err_val = if (!is_pl) tv.val else Value.zero; return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } @@ -3600,7 +3596,7 @@ pub const DeclGen = struct { const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, - .val = if (is_pl) Value.initTag(.zero) else tv.val, + .val = if (is_pl) Value.zero else tv.val, }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, @@ -3882,14 +3878,9 @@ pub const DeclGen = struct { const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { - var byte_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = bytes[i], - }; - elem.* = try dg.lowerValue(.{ .ty = elem_ty, - .val = Value.initPayload(&byte_payload.base), + .val = try mod.intValue(elem_ty, bytes[i]), }); } return llvm.constVector( @@ -3940,14 +3931,9 @@ pub const DeclGen = struct { const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems, 0..) |*elem, i| { - var byte_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = bytes[i], - }; - elem.* = try dg.lowerValue(.{ .ty = elem_ty, - .val = Value.initPayload(&byte_payload.base), + .val = try mod.intValue(elem_ty, bytes[i]), }); } return llvm.constVector( @@ -3974,6 +3960,13 @@ pub const DeclGen = struct { } } + fn lowerIntAsPtr(dg: *DeclGen, int: InternPool.Key.Int) *llvm.Value { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int.storage.toBigInt(&bigint_space); + const llvm_int = lowerBigInt(dg, Type.usize, bigint); + return llvm_int.constIntToPtr(dg.context.pointerType(0)); + } + fn lowerBigInt(dg: *DeclGen, ty: Type, bigint: std.math.big.int.Const) *llvm.Value { const mod = dg.module; const int_info = ty.intInfo(mod); @@ -4018,6 +4011,10 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); + if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .int => |int| return lowerIntAsPtr(dg, int), + else => unreachable, + }; switch (ptr_val.tag()) { .decl_ref_mut => { const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; @@ -4031,18 +4028,6 @@ pub const DeclGen = struct { const decl = ptr_val.castTag(.variable).?.data.owner_decl; return dg.lowerParentPtrDecl(ptr_val, decl); }, - .int_i64 => { - const int = ptr_val.castTag(.int_i64).?.data; - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False); - return llvm_int.constIntToPtr(dg.context.pointerType(0)); - }, - .int_u64 => { - const int = ptr_val.castTag(.int_u64).?.data; - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(int, .False); - return llvm_int.constIntToPtr(dg.context.pointerType(0)); - }, .field_ptr => { const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, byte_aligned); @@ -4185,10 +4170,6 @@ pub const DeclGen = struct { if (tv.ty.isSlice(mod)) { var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf, mod); - var slice_len: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = tv.val.sliceLen(mod), - }; const fields: [2]*llvm.Value = .{ try self.lowerValue(.{ .ty = ptr_ty, @@ -4196,7 +4177,7 @@ pub const DeclGen = struct { }), try self.lowerValue(.{ .ty = Type.usize, - .val = Value.initPayload(&slice_len.base), + .val = try mod.intValue(Type.usize, tv.val.sliceLen(mod)), }), }; return self.context.constStruct(&fields, fields.len, .False); @@ -8507,8 +8488,7 @@ pub const FuncGen = struct { const dest_slice = try self.resolveInst(bin_op.lhs); const ptr_ty = self.typeOf(bin_op.lhs); const elem_ty = self.typeOf(bin_op.rhs); - const module = self.dg.module; - const target = module.getTarget(); + const target = mod.getTarget(); const dest_ptr_align = ptr_ty.ptrAlignment(mod); const u8_llvm_ty = self.context.intType(8); const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); @@ -8526,7 +8506,7 @@ pub const FuncGen = struct { const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty); _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile); - if (safety and module.comp.bin_file.options.valgrind) { + if (safety and mod.comp.bin_file.options.valgrind) { self.valgrindMarkUndef(dest_ptr, len); } return null; @@ -8536,8 +8516,7 @@ pub const FuncGen = struct { // repeating byte pattern, for example, `@as(u64, 0)` has a // repeating byte pattern of 0 bytes. In such case, the memset // intrinsic can be used. - var value_buffer: Value.Payload.U64 = undefined; - if (try elem_val.hasRepeatedByteRepr(elem_ty, module, &value_buffer)) |byte_val| { + if (try elem_val.hasRepeatedByteRepr(elem_ty, mod)) |byte_val| { const fill_byte = try self.resolveValue(.{ .ty = Type.u8, .val = byte_val, @@ -8829,16 +8808,10 @@ pub const FuncGen = struct { for (names) |name| { const err_int = mod.global_error_set.get(name).?; - const this_tag_int_value = int: { - var tag_val_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = err_int, - }; - break :int try self.dg.lowerValue(.{ - .ty = Type.err_int, - .val = Value.initPayload(&tag_val_payload.base), - }); - }; + const this_tag_int_value = try self.dg.lowerValue(.{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, err_int), + }); switch_instr.addCase(this_tag_int_value, valid_block); } self.builder.positionBuilderAtEnd(valid_block); @@ -9122,8 +9095,7 @@ pub const FuncGen = struct { const llvm_i32 = self.context.intType(32); for (values, 0..) |*val, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(mod, i, &buf); + const elem = try mask.elemValue(mod, i); if (elem.isUndef()) { val.* = llvm_i32.getUndef(); } else { @@ -9457,8 +9429,7 @@ pub const FuncGen = struct { .data = @intCast(u32, enum_field_index), }; const tag_val = Value.initPayload(&tag_val_payload.base); - var int_payload: Value.Payload.U64 = undefined; - const tag_int_val = tag_val.enumToInt(tag_ty, &int_payload); + const tag_int_val = try tag_val.enumToInt(tag_ty, mod); break :blk tag_int_val.toUnsignedInt(mod); }; if (layout.payload_size == 0) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index e3b5d24ed9..32e0c13c37 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -555,15 +555,15 @@ pub const DeclGen = struct { // TODO: Swap endianess if the compiler is big endian. switch (ty.floatBits(target)) { 16 => { - const float_bits = val.toFloat(f16); + const float_bits = val.toFloat(f16, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, 32 => { - const float_bits = val.toFloat(f32); + const float_bits = val.toFloat(f32, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, 64 => { - const float_bits = val.toFloat(f64); + const float_bits = val.toFloat(f64, mod); try self.addBytes(std.mem.asBytes(&float_bits)[0..@intCast(usize, len)]); }, else => unreachable, @@ -584,7 +584,7 @@ pub const DeclGen = struct { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. - try self.addInt(Type.usize, Value.initTag(.zero)); + try self.addInt(Type.usize, Value.zero); // TODO: Add dependency return; }, @@ -743,8 +743,7 @@ pub const DeclGen = struct { try self.addUndef(padding); }, .Enum => { - var int_val_buffer: Value.Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &int_val_buffer); + const int_val = try val.enumToInt(ty, mod); const int_ty = ty.intTagType(); @@ -787,22 +786,24 @@ pub const DeclGen = struct { try self.addUndef(layout.padding); }, - .ErrorSet => switch (val.tag()) { - .@"error" => { - const err_name = val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - try self.addConstInt(u16, @intCast(u16, kv.value)); + .ErrorSet => switch (val.ip_index) { + .none => switch (val.tag()) { + .@"error" => { + const err_name = val.castTag(.@"error").?.data.name; + const kv = try dg.module.getErrorValue(err_name); + try self.addConstInt(u16, @intCast(u16, kv.value)); + }, + else => unreachable, }, - .zero => { - // Unactivated error set. - try self.addConstInt(u16, 0); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| try self.addConstInt(u16, @intCast(u16, int.storage.u64)), + else => unreachable, }, - else => unreachable, }, .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else Value.initTag(.zero); + const error_val = if (!is_pl) val else Value.zero; const eu_layout = dg.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -993,9 +994,9 @@ pub const DeclGen = struct { .indirect => return try self.spv.constInt(result_ty_ref, @boolToInt(val.toBool(mod))), }, .Float => return switch (ty.floatBits(target)) { - 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16) } } }), - 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32) } } }), - 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64) } } }), + 16 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float16 = val.toFloat(f16, mod) } } }), + 32 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float32 = val.toFloat(f32, mod) } } }), + 64 => try self.spv.resolveId(.{ .float = .{ .ty = result_ty_ref, .value = .{ .float64 = val.toFloat(f64, mod) } } }), 80, 128 => unreachable, // TODO else => unreachable, }, @@ -1531,6 +1532,7 @@ pub const DeclGen = struct { } fn genDecl(self: *DeclGen) !void { + if (true) @panic("TODO: update SPIR-V backend for InternPool changes"); const mod = self.module; const decl = mod.declPtr(self.decl_index); const spv_decl_index = try self.resolveDecl(self.decl_index); @@ -2087,8 +2089,7 @@ pub const DeclGen = struct { var i: usize = 0; while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(self.module, i, &buf); + const elem = try mask.elemValue(self.module, i); if (elem.isUndef()) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { @@ -3146,9 +3147,8 @@ pub const DeclGen = struct { const int_val = switch (cond_ty.zigTypeTag(mod)) { .Int => if (cond_ty.isSignedInt(mod)) @bitCast(u64, value.toSignedInt(mod)) else value.toUnsignedInt(mod), .Enum => blk: { - var int_buffer: Value.Payload.U64 = undefined; // TODO: figure out of cond_ty is correct (something with enum literals) - break :blk value.enumToInt(cond_ty, &int_buffer).toUnsignedInt(mod); // TODO: composite integer constants + break :blk (try value.enumToInt(cond_ty, mod)).toUnsignedInt(mod); // TODO: composite integer constants }, else => unreachable, }; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 3e4e90951e..c971b5b26f 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -421,8 +421,7 @@ pub const DeclState = struct { const value = vals.keys()[field_i]; // TODO do not assume a 64bit enum value - could be bigger. // See https://github.com/ziglang/zig/issues/645 - var int_buffer: Value.Payload.U64 = undefined; - const field_int_val = value.enumToInt(ty, &int_buffer); + const field_int_val = try value.enumToInt(ty, mod); break :value @bitCast(u64, field_int_val.toSignedInt(mod)); } else @intCast(u64, field_i); mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); diff --git a/src/type.zig b/src/type.zig index 592eb9a21e..5b18245323 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2077,10 +2077,10 @@ pub const Type = struct { } /// May capture a reference to `ty`. - pub fn lazyAbiAlignment(ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn lazyAbiAlignment(ty: Type, mod: *Module, arena: Allocator) !Value { switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, - .scalar => |x| return Value.Tag.int_u64.create(arena, x), + .scalar => |x| return mod.intValue(ty, x), } } @@ -2468,10 +2468,10 @@ pub const Type = struct { } /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn lazyAbiSize(ty: Type, mod: *Module, arena: Allocator) !Value { switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) { .val => |val| return val, - .scalar => |x| return Value.Tag.int_u64.create(arena, x), + .scalar => |x| return mod.intValue(ty, x), } } @@ -4310,8 +4310,8 @@ pub const Type = struct { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, arena: Allocator, mod: *const Module) !Value { - const scalar = try minIntScalar(ty.scalarType(mod), arena, mod); + pub fn minInt(ty: Type, arena: Allocator, mod: *Module) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), mod); if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { @@ -4319,38 +4319,28 @@ pub const Type = struct { } } - /// Asserts that self.zigTypeTag(mod) == .Int. - pub fn minIntScalar(ty: Type, arena: Allocator, mod: *const Module) !Value { - assert(ty.zigTypeTag(mod) == .Int); + /// Asserts that the type is an integer. + pub fn minIntScalar(ty: Type, mod: *Module) !Value { const info = ty.intInfo(mod); - - if (info.bits == 0) { - return Value.initTag(.the_only_possible_value); - } - - if (info.signedness == .unsigned) { - return Value.zero; - } + if (info.signedness == .unsigned) return Value.zero; + if (info.bits == 0) return Value.negative_one; if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); - return Value.Tag.int_i64.create(arena, n); + return mod.intValue(Type.comptime_int, n); } - var res = try std.math.big.int.Managed.init(arena); + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + try res.setTwosCompIntLimit(.min, info.signedness, info.bits); - const res_const = res.toConst(); - if (res_const.positive) { - return Value.Tag.int_big_positive.create(arena, res_const.limbs); - } else { - return Value.Tag.int_big_negative.create(arena, res_const.limbs); - } + return mod.intValue_big(Type.comptime_int, res.toConst()); } // Works for vectors and vectors of integers. - pub fn maxInt(ty: Type, arena: Allocator, mod: *const Module) !Value { - const scalar = try maxIntScalar(ty.scalarType(mod), arena, mod); + pub fn maxInt(ty: Type, arena: Allocator, mod: *Module) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), mod); if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { @@ -4358,41 +4348,39 @@ pub const Type = struct { } } - /// Asserts that self.zigTypeTag() == .Int. - pub fn maxIntScalar(self: Type, arena: Allocator, mod: *const Module) !Value { - assert(self.zigTypeTag(mod) == .Int); + /// Asserts that the type is an integer. + pub fn maxIntScalar(self: Type, mod: *Module) !Value { const info = self.intInfo(mod); - if (info.bits == 0) { - return Value.initTag(.the_only_possible_value); - } - - switch (info.bits - @boolToInt(info.signedness == .signed)) { - 0 => return Value.zero, - 1 => return Value.one, + switch (info.bits) { + 0 => return switch (info.signedness) { + .signed => Value.negative_one, + .unsigned => Value.zero, + }, + 1 => return switch (info.signedness) { + .signed => Value.zero, + .unsigned => Value.one, + }, else => {}, } if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { .signed => { const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); - return Value.Tag.int_i64.create(arena, n); + return mod.intValue(Type.comptime_int, n); }, .unsigned => { const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); - return Value.Tag.int_u64.create(arena, n); + return mod.intValue(Type.comptime_int, n); }, }; - var res = try std.math.big.int.Managed.init(arena); + var res = try std.math.big.int.Managed.init(mod.gpa); + defer res.deinit(); + try res.setTwosCompIntLimit(.max, info.signedness, info.bits); - const res_const = res.toConst(); - if (res_const.positive) { - return Value.Tag.int_big_positive.create(arena, res_const.limbs); - } else { - return Value.Tag.int_big_negative.create(arena, res_const.limbs); - } + return mod.intValue_big(Type.comptime_int, res.toConst()); } /// Asserts the type is an enum or a union. @@ -4497,12 +4485,11 @@ pub const Type = struct { const S = struct { fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, m: *Module) ?usize { if (int_val.compareAllWithZero(.lt, m)) return null; - var end_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = end, + const end_val = m.intValue(int_ty, end) catch |err| switch (err) { + // TODO: eliminate this failure condition + error.OutOfMemory => @panic("OOM"), }; - const end_val = Value.initPayload(&end_payload.base); - if (int_val.compareAll(.gte, end_val, int_ty, m)) return null; + if (int_val.compareScalar(.gte, end_val, int_ty, m)) return null; return @intCast(usize, int_val.toUnsignedInt(m)); } }; diff --git a/src/value.zig b/src/value.zig index f8188c64ab..c0ea9e149f 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,8 +33,6 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - zero, - one, /// The only possible value for a particular type, which is stored externally. the_only_possible_value, @@ -43,10 +41,6 @@ pub const Value = struct { // After this, the tag requires a payload. ty, - int_u64, - int_i64, - int_big_positive, - int_big_negative, function, extern_fn, /// A comptime-known pointer can point to the address of a global @@ -129,17 +123,11 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .zero, - .one, .the_only_possible_value, .empty_struct_value, .empty_array, => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), - .int_big_positive, - .int_big_negative, - => Payload.BigInt, - .extern_fn => Payload.ExternFn, .decl_ref => Payload.Decl, @@ -169,8 +157,6 @@ pub const Value = struct { .lazy_size, => Payload.Ty, - .int_u64 => Payload.U64, - .int_i64 => Payload.I64, .function => Payload.Function, .variable => Payload.Variable, .decl_ref_mut => Payload.DeclRefMut, @@ -281,8 +267,6 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .zero, - .one, .the_only_possible_value, .empty_array, .empty_struct_value, @@ -300,20 +284,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .int_u64 => return self.copyPayloadShallow(arena, Payload.U64), - .int_i64 => return self.copyPayloadShallow(arena, Payload.I64), - .int_big_positive, .int_big_negative => { - const old_payload = self.cast(Payload.BigInt).?; - const new_payload = try arena.create(Payload.BigInt); - new_payload.* = .{ - .base = .{ .tag = self.legacy.ptr_otherwise.tag }, - .data = try arena.dupe(std.math.big.Limb, old_payload.data), - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, .function => return self.copyPayloadShallow(arena, Payload.Function), .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn), .variable => return self.copyPayloadShallow(arena, Payload.Variable), @@ -525,8 +495,6 @@ pub const Value = struct { .@"union" => { return out_stream.writeAll("(union value)"); }, - .zero => return out_stream.writeAll("0"), - .one => return out_stream.writeAll("1"), .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), .ty => return val.castTag(.ty).?.data.dump("", options, out_stream), .lazy_align => { @@ -539,10 +507,6 @@ pub const Value = struct { try val.castTag(.lazy_size).?.data.dump("", options, out_stream); return try out_stream.writeAll(")"); }, - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", options, out_stream), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), - .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), .runtime_value => return out_stream.writeAll("[runtime value]"), .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}), .extern_fn => return out_stream.writeAll("(extern function)"), @@ -661,9 +625,8 @@ pub const Value = struct { fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { const result = try allocator.alloc(u8, @intCast(usize, len)); - var elem_value_buf: ElemValueBuffer = undefined; for (result, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf); + const elem_val = try val.elemValue(mod, i); elem.* = @intCast(u8, elem_val.toUnsignedInt(mod)); } return result; @@ -695,7 +658,7 @@ pub const Value = struct { } } - pub fn enumToInt(val: Value, ty: Type, buffer: *Payload.U64) Value { + pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, .the_only_possible_value => blk: { @@ -717,11 +680,7 @@ pub const Value = struct { return enum_full.values.keys()[field_index]; } else { // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); + return mod.intValue(enum_full.tag_ty, field_index); } }, .enum_numbered => { @@ -730,20 +689,13 @@ pub const Value = struct { return enum_obj.values.keys()[field_index]; } else { // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); + return mod.intValue(enum_obj.tag_ty, field_index); } }, .enum_simple => { // Field index and integer values are the same. - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = field_index, - }; - return Value.initPayload(&buffer.base); + const tag_ty = ty.intTagType(); + return mod.intValue(tag_ty, field_index); }, else => unreachable, } @@ -802,12 +754,9 @@ pub const Value = struct { .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), .none => switch (val.tag()) { - .zero, .the_only_possible_value, // i0, u0 => BigIntMutable.init(&space.limbs, 0).toConst(), - .one => BigIntMutable.init(&space.limbs, 1).toConst(), - .enum_field_index => { const index = val.castTag(.enum_field_index).?.data; return BigIntMutable.init(&space.limbs, index).toConst(); @@ -816,11 +765,6 @@ pub const Value = struct { const sub_val = val.castTag(.runtime_value).?.data; return sub_val.toBigIntAdvanced(space, mod, opt_sema); }, - .int_u64 => BigIntMutable.init(&space.limbs, val.castTag(.int_u64).?.data).toConst(), - .int_i64 => BigIntMutable.init(&space.limbs, val.castTag(.int_i64).?.data).toConst(), - .int_big_positive => val.castTag(.int_big_positive).?.asBigInt(), - .int_big_negative => val.castTag(.int_big_negative).?.asBigInt(), - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -869,17 +813,9 @@ pub const Value = struct { .bool_true => return 1, .undef => unreachable, .none => switch (val.tag()) { - .zero, .the_only_possible_value, // i0, u0 => return 0, - .one => return 1, - - .int_u64 => return val.castTag(.int_u64).?.data, - .int_i64 => return @intCast(u64, val.castTag(.int_i64).?.data), - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null, - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null, - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -922,17 +858,9 @@ pub const Value = struct { .bool_true => return 1, .undef => unreachable, .none => switch (val.tag()) { - .zero, .the_only_possible_value, // i0, u0 => return 0, - .one => return 1, - - .int_u64 => return @intCast(i64, val.castTag(.int_u64).?.data), - .int_i64 => return val.castTag(.int_i64).?.data, - .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(i64) catch unreachable, - .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(i64) catch unreachable, - .lazy_align => { const ty = val.castTag(.lazy_align).?.data; return @intCast(i64, ty.abiAlignment(mod)); @@ -959,22 +887,7 @@ pub const Value = struct { return switch (val.ip_index) { .bool_true => true, .bool_false => false, - .none => switch (val.tag()) { - .one => true, - .zero => false, - - .int_u64 => switch (val.castTag(.int_u64).?.data) { - 0 => false, - 1 => true, - else => unreachable, - }, - .int_i64 => switch (val.castTag(.int_i64).?.data) { - 0 => false, - 1 => true, - else => unreachable, - }, - else => unreachable, - }, + .none => unreachable, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| !big_int.eqZero(), @@ -1004,6 +917,7 @@ pub const Value = struct { ReinterpretDeclRef, IllDefinedMemoryLayout, Unimplemented, + OutOfMemory, }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); @@ -1022,16 +936,14 @@ pub const Value = struct { const bits = int_info.bits; const byte_count = (bits + 7) / 8; - var enum_buffer: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_buffer); + const int_val = try val.enumToInt(ty, mod); if (byte_count <= @sizeOf(u64)) { - const int: u64 = switch (int_val.tag()) { - .zero => 0, - .one => 1, - .int_u64 => int_val.castTag(.int_u64).?.data, - .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data), - else => unreachable, + const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); + const int: u64 = switch (ip_key.int.storage) { + .u64 => |x| x, + .i64 => |x| @bitCast(u64, x), + .big_int => unreachable, }; for (buffer[0..byte_count], 0..) |_, i| switch (endian) { .Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), @@ -1044,11 +956,11 @@ pub const Value = struct { } }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16)), endian), - 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32)), endian), - 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64)), endian), - 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80)), endian), - 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128)), endian), + 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian), + 32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(u32, val.toFloat(f32, mod)), endian), + 64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(u64, val.toFloat(f64, mod)), endian), + 80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(u80, val.toFloat(f80, mod)), endian), + 128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(u128, val.toFloat(f128, mod)), endian), else => unreachable, }, .Array => { @@ -1056,10 +968,9 @@ pub const Value = struct { const elem_ty = ty.childType(mod); const elem_size = @intCast(usize, elem_ty.abiSize(mod)); var elem_i: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; var buf_off: usize = 0; while (elem_i < len) : (elem_i += 1) { - const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf); + const elem_val = try val.elemValue(mod, elem_i); try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]); buf_off += elem_size; } @@ -1122,7 +1033,13 @@ pub const Value = struct { /// /// Both the start and the end of the provided buffer must be tight, since /// big-endian packed memory layouts start at the end of the buffer. - pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) error{ReinterpretDeclRef}!void { + pub fn writeToPackedMemory( + val: Value, + ty: Type, + mod: *Module, + buffer: []u8, + bit_offset: usize, + ) error{ ReinterpretDeclRef, OutOfMemory }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); if (val.isUndef()) { @@ -1147,16 +1064,14 @@ pub const Value = struct { const bits = ty.intInfo(mod).bits; const abi_size = @intCast(usize, ty.abiSize(mod)); - var enum_buffer: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_buffer); + const int_val = try val.enumToInt(ty, mod); if (abi_size == 0) return; if (abi_size <= @sizeOf(u64)) { - const int: u64 = switch (int_val.tag()) { - .zero => 0, - .one => 1, - .int_u64 => int_val.castTag(.int_u64).?.data, - .int_i64 => @bitCast(u64, int_val.castTag(.int_i64).?.data), + const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); + const int: u64 = switch (ip_key.int.storage) { + .u64 => |x| x, + .i64 => |x| @bitCast(u64, x), else => unreachable, }; std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian); @@ -1167,11 +1082,11 @@ pub const Value = struct { } }, .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16)), endian), - 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32)), endian), - 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64)), endian), - 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80)), endian), - 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128)), endian), + 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(u16, val.toFloat(f16, mod)), endian), + 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(u32, val.toFloat(f32, mod)), endian), + 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(u64, val.toFloat(f64, mod)), endian), + 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(u80, val.toFloat(f80, mod)), endian), + 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(u128, val.toFloat(f128, mod)), endian), else => unreachable, }, .Vector => { @@ -1181,11 +1096,10 @@ pub const Value = struct { var bits: u16 = 0; var elem_i: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; while (elem_i < len) : (elem_i += 1) { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) len - elem_i - 1 else elem_i; - const elem_val = val.elemValueBuffer(mod, tgt_elem_i, &elem_value_buf); + const elem_val = try val.elemValue(mod, tgt_elem_i); try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits); bits += elem_bit_size; } @@ -1264,11 +1178,13 @@ pub const Value = struct { if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); - return Value.Tag.int_i64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits)); + const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + return mod.intValue(ty, result); }, .unsigned => { const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); - return Value.Tag.int_u64.create(arena, (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits)); + const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); + return mod.intValue(ty, result); }, } else { // Slow path, we have to construct a big-int const Limb = std.math.big.Limb; @@ -1277,7 +1193,7 @@ pub const Value = struct { var bigint = BigIntMutable.init(limbs_buffer, 0); bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness); - return fromBigInt(arena, bigint.toConst()); + return mod.intValue_big(ty, bigint.toConst()); } }, .Float => switch (ty.floatBits(target)) { @@ -1381,8 +1297,8 @@ pub const Value = struct { const bits = int_info.bits; if (bits == 0) return Value.zero; if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 - .signed => return Value.Tag.int_i64.create(arena, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), - .unsigned => return Value.Tag.int_u64.create(arena, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), + .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), + .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), } else { // Slow path, we have to construct a big-int const Limb = std.math.big.Limb; const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); @@ -1390,7 +1306,7 @@ pub const Value = struct { var bigint = BigIntMutable.init(limbs_buffer, 0); bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); - return fromBigInt(arena, bigint.toConst()); + return mod.intValue_big(ty, bigint.toConst()); } }, .Float => switch (ty.floatBits(target)) { @@ -1444,32 +1360,29 @@ pub const Value = struct { } /// Asserts that the value is a float or an integer. - pub fn toFloat(val: Value, comptime T: type) T { - return switch (val.tag()) { - .float_16 => @floatCast(T, val.castTag(.float_16).?.data), - .float_32 => @floatCast(T, val.castTag(.float_32).?.data), - .float_64 => @floatCast(T, val.castTag(.float_64).?.data), - .float_80 => @floatCast(T, val.castTag(.float_80).?.data), - .float_128 => @floatCast(T, val.castTag(.float_128).?.data), - - .zero => 0, - .one => 1, - .int_u64 => { - if (T == f80) { - @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); - } - return @intToFloat(T, val.castTag(.int_u64).?.data); + pub fn toFloat(val: Value, comptime T: type, mod: *const Module) T { + return switch (val.ip_index) { + .none => switch (val.tag()) { + .float_16 => @floatCast(T, val.castTag(.float_16).?.data), + .float_32 => @floatCast(T, val.castTag(.float_32).?.data), + .float_64 => @floatCast(T, val.castTag(.float_64).?.data), + .float_80 => @floatCast(T, val.castTag(.float_80).?.data), + .float_128 => @floatCast(T, val.castTag(.float_128).?.data), + + else => unreachable, }, - .int_i64 => { - if (T == f80) { - @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); - } - return @intToFloat(T, val.castTag(.int_i64).?.data); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), + inline .u64, .i64 => |x| { + if (T == f80) { + @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); + } + return @intToFloat(T, x); + }, + }, + else => unreachable, }, - - .int_big_positive => @floatCast(T, bigIntToFloat(val.castTag(.int_big_positive).?.data, true)), - .int_big_negative => @floatCast(T, bigIntToFloat(val.castTag(.int_big_negative).?.data, false)), - else => unreachable, }; } @@ -1498,24 +1411,6 @@ pub const Value = struct { .bool_false => ty_bits, .bool_true => ty_bits - 1, .none => switch (val.tag()) { - .zero => ty_bits, - .one => ty_bits - 1, - - .int_u64 => { - const big = @clz(val.castTag(.int_u64).?.data); - return big + ty_bits - 64; - }, - .int_i64 => { - @panic("TODO implement i64 Value clz"); - }, - .int_big_positive => { - const bigint = val.castTag(.int_big_positive).?.asBigInt(); - return bigint.clz(ty_bits); - }, - .int_big_negative => { - @panic("TODO implement int_big_negative Value clz"); - }, - .the_only_possible_value => { assert(ty_bits == 0); return ty_bits; @@ -1546,24 +1441,6 @@ pub const Value = struct { .bool_false => ty_bits, .bool_true => 0, .none => switch (val.tag()) { - .zero => ty_bits, - .one => 0, - - .int_u64 => { - const big = @ctz(val.castTag(.int_u64).?.data); - return if (big == 64) ty_bits else big; - }, - .int_i64 => { - @panic("TODO implement i64 Value ctz"); - }, - .int_big_positive => { - const bigint = val.castTag(.int_big_positive).?.asBigInt(); - return bigint.ctz(); - }, - .int_big_negative => { - @panic("TODO implement int_big_negative Value ctz"); - }, - .the_only_possible_value => { assert(ty_bits == 0); return ty_bits; @@ -1596,20 +1473,7 @@ pub const Value = struct { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, - .none => switch (val.tag()) { - .zero => return 0, - .one => return 1, - - .int_u64 => return @popCount(val.castTag(.int_u64).?.data), - - else => { - const info = ty.intInfo(mod); - - var buffer: Value.BigIntSpace = undefined; - const int = val.toBigInt(&buffer, mod); - return @intCast(u64, int.popCount(info.bits)); - }, - }, + .none => unreachable, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| { const info = ty.intInfo(mod); @@ -1622,7 +1486,7 @@ pub const Value = struct { } } - pub fn bitReverse(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { assert(!val.isUndef()); const info = ty.intInfo(mod); @@ -1637,10 +1501,10 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitReverse(operand_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } - pub fn byteSwap(val: Value, ty: Type, mod: *const Module, arena: Allocator) !Value { + pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { assert(!val.isUndef()); const info = ty.intInfo(mod); @@ -1658,7 +1522,7 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Asserts the value is an integer and not undefined. @@ -1669,19 +1533,7 @@ pub const Value = struct { .bool_false => 0, .bool_true => 1, .none => switch (self.tag()) { - .zero, - .the_only_possible_value, - => 0, - - .one => 1, - - .int_u64 => { - const x = self.castTag(.int_u64).?.data; - if (x == 0) return 0; - return @intCast(usize, std.math.log2(x) + 1); - }, - .int_big_positive => self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(), - .int_big_negative => self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(), + .the_only_possible_value => 0, .decl_ref_mut, .comptime_field_ptr, @@ -1715,13 +1567,14 @@ pub const Value = struct { /// Converts an integer or a float to a float. May result in a loss of information. /// Caller can find out by equality checking the result against the operand. - pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, target: Target) !Value { + pub fn floatCast(self: Value, arena: Allocator, dest_ty: Type, mod: *const Module) !Value { + const target = mod.getTarget(); switch (dest_ty.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)), - 32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)), - 64 => return Value.Tag.float_64.create(arena, self.toFloat(f64)), - 80 => return Value.Tag.float_80.create(arena, self.toFloat(f80)), - 128 => return Value.Tag.float_128.create(arena, self.toFloat(f128)), + 16 => return Value.Tag.float_16.create(arena, self.toFloat(f16, mod)), + 32 => return Value.Tag.float_32.create(arena, self.toFloat(f32, mod)), + 64 => return Value.Tag.float_64.create(arena, self.toFloat(f64, mod)), + 80 => return Value.Tag.float_80.create(arena, self.toFloat(f80, mod)), + 128 => return Value.Tag.float_128.create(arena, self.toFloat(f128, mod)), else => unreachable, } } @@ -1729,10 +1582,6 @@ pub const Value = struct { /// Asserts the value is a float pub fn floatHasFraction(self: Value) bool { return switch (self.tag()) { - .zero, - .one, - => false, - .float_16 => @rem(self.castTag(.float_16).?.data, 1) != 0, .float_32 => @rem(self.castTag(.float_32).?.data, 1) != 0, .float_64 => @rem(self.castTag(.float_64).?.data, 1) != 0, @@ -1757,11 +1606,8 @@ pub const Value = struct { .bool_false => return .eq, .bool_true => return .gt, .none => return switch (lhs.tag()) { - .zero, - .the_only_possible_value, - => .eq, + .the_only_possible_value => .eq, - .one, .decl_ref, .decl_ref_mut, .comptime_field_ptr, @@ -1777,10 +1623,6 @@ pub const Value = struct { const val = lhs.castTag(.runtime_value).?.data; return val.orderAgainstZeroAdvanced(mod, opt_sema); }, - .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), - .int_i64 => std.math.order(lhs.castTag(.int_i64).?.data, 0), - .int_big_positive => lhs.castTag(.int_big_positive).?.asBigInt().orderAgainstScalar(0), - .int_big_negative => lhs.castTag(.int_big_negative).?.asBigInt().orderAgainstScalar(0), .lazy_align => { const ty = lhs.castTag(.lazy_align).?.data; @@ -1878,8 +1720,8 @@ pub const Value = struct { } } if (lhs_float or rhs_float) { - const lhs_f128 = lhs.toFloat(f128); - const rhs_f128 = rhs.toFloat(f128); + const lhs_f128 = lhs.toFloat(f128, mod); + const rhs_f128 = rhs.toFloat(f128, mod); return std.math.order(lhs_f128, rhs_f128); } @@ -1929,15 +1771,13 @@ pub const Value = struct { /// Asserts the values are comparable. Both operands have type `ty`. /// For vectors, returns true if comparison is true for ALL elements. - pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool { + pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) !bool { if (ty.zigTypeTag(mod) == .Vector) { - var i: usize = 0; - while (i < ty.vectorLen(mod)) : (i += 1) { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - if (!compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod), mod)) { + const scalar_ty = ty.scalarType(mod); + for (0..ty.vectorLen(mod)) |i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, mod)) { return false; } } @@ -2203,10 +2043,8 @@ pub const Value = struct { return a_type.eql(b_type, mod); }, .Enum => { - var buf_a: Payload.U64 = undefined; - var buf_b: Payload.U64 = undefined; - const a_val = a.enumToInt(ty, &buf_a); - const b_val = b.enumToInt(ty, &buf_b); + const a_val = try a.enumToInt(ty, mod); + const b_val = try b.enumToInt(ty, mod); const int_ty = ty.intTagType(); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, @@ -2214,11 +2052,9 @@ pub const Value = struct { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); var i: usize = 0; - var a_buf: ElemValueBuffer = undefined; - var b_buf: ElemValueBuffer = undefined; while (i < len) : (i += 1) { - const a_elem = elemValueBuffer(a, mod, i, &a_buf); - const b_elem = elemValueBuffer(b, mod, i, &b_buf); + const a_elem = try elemValue(a, mod, i); + const b_elem = try elemValue(b, mod, i); if (!(try eqlAdvanced(a_elem, elem_ty, b_elem, elem_ty, mod, opt_sema))) { return false; } @@ -2282,17 +2118,17 @@ pub const Value = struct { }, .Float => { switch (ty.floatBits(target)) { - 16 => return @bitCast(u16, a.toFloat(f16)) == @bitCast(u16, b.toFloat(f16)), - 32 => return @bitCast(u32, a.toFloat(f32)) == @bitCast(u32, b.toFloat(f32)), - 64 => return @bitCast(u64, a.toFloat(f64)) == @bitCast(u64, b.toFloat(f64)), - 80 => return @bitCast(u80, a.toFloat(f80)) == @bitCast(u80, b.toFloat(f80)), - 128 => return @bitCast(u128, a.toFloat(f128)) == @bitCast(u128, b.toFloat(f128)), + 16 => return @bitCast(u16, a.toFloat(f16, mod)) == @bitCast(u16, b.toFloat(f16, mod)), + 32 => return @bitCast(u32, a.toFloat(f32, mod)) == @bitCast(u32, b.toFloat(f32, mod)), + 64 => return @bitCast(u64, a.toFloat(f64, mod)) == @bitCast(u64, b.toFloat(f64, mod)), + 80 => return @bitCast(u80, a.toFloat(f80, mod)) == @bitCast(u80, b.toFloat(f80, mod)), + 128 => return @bitCast(u128, a.toFloat(f128, mod)) == @bitCast(u128, b.toFloat(f128, mod)), else => unreachable, } }, .ComptimeFloat => { - const a_float = a.toFloat(f128); - const b_float = b.toFloat(f128); + const a_float = a.toFloat(f128, mod); + const b_float = b.toFloat(f128, mod); const a_nan = std.math.isNan(a_float); const b_nan = std.math.isNan(b_float); @@ -2354,16 +2190,16 @@ pub const Value = struct { .Float => { // For hash/eql purposes, we treat floats as their IEEE integer representation. switch (ty.floatBits(mod.getTarget())) { - 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16))), - 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32))), - 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64))), - 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80))), - 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), + 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16, mod))), + 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32, mod))), + 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64, mod))), + 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80, mod))), + 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), else => unreachable, } }, .ComptimeFloat => { - const float = val.toFloat(f128); + const float = val.toFloat(f128, mod); const is_nan = std.math.isNan(float); std.hash.autoHash(hasher, is_nan); if (!is_nan) { @@ -2387,9 +2223,11 @@ pub const Value = struct { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); var index: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { - const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf); + const elem_val = val.elemValue(mod, index) catch |err| switch (err) { + // Will be solved when arrays and vectors get migrated to the intern pool. + error.OutOfMemory => @panic("OOM"), + }; elem_val.hash(elem_ty, hasher, mod); } }, @@ -2438,8 +2276,8 @@ pub const Value = struct { hasher.update(val.getError().?); }, .Enum => { - var enum_space: Payload.U64 = undefined; - const int_val = val.enumToInt(ty, &enum_space); + // This panic will go away when enum values move to be stored in the intern pool. + const int_val = val.enumToInt(ty, mod) catch @panic("OOM"); hashInt(int_val, hasher, mod); }, .Union => { @@ -2494,7 +2332,7 @@ pub const Value = struct { .Type => { val.toType().hashWithHasher(hasher, mod); }, - .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128))), + .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { .slice => { const slice = val.castTag(.slice).?.data; @@ -2508,9 +2346,11 @@ pub const Value = struct { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); var index: usize = 0; - var elem_value_buf: ElemValueBuffer = undefined; while (index < len) : (index += 1) { - const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf); + const elem_val = val.elemValue(mod, index) catch |err| switch (err) { + // Will be solved when arrays and vectors get migrated to the intern pool. + error.OutOfMemory => @panic("OOM"), + }; elem_val.hashUncoerced(elem_ty, hasher, mod); } }, @@ -2661,12 +2501,6 @@ pub const Value = struct { hashPtr(opt_ptr.container_ptr, hasher, mod); }, - .zero, - .one, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, .the_only_possible_value, .lazy_align, .lazy_size, @@ -2720,23 +2554,7 @@ pub const Value = struct { /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. - pub fn elemValue(val: Value, mod: *Module, arena: Allocator, index: usize) !Value { - return elemValueAdvanced(val, mod, index, arena, undefined); - } - - pub const ElemValueBuffer = Payload.U64; - - pub fn elemValueBuffer(val: Value, mod: *Module, index: usize, buffer: *ElemValueBuffer) Value { - return elemValueAdvanced(val, mod, index, null, buffer) catch unreachable; - } - - pub fn elemValueAdvanced( - val: Value, - mod: *Module, - index: usize, - arena: ?Allocator, - buffer: *ElemValueBuffer, - ) error{OutOfMemory}!Value { + pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { switch (val.ip_index) { .undef => return Value.undef, .none => switch (val.tag()) { @@ -2751,43 +2569,27 @@ pub const Value = struct { .bytes => { const byte = val.castTag(.bytes).?.data[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } + return mod.intValue(Type.u8, byte); }, .str_lit => { const str_lit = val.castTag(.str_lit).?.data; const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const byte = bytes[index]; - if (arena) |a| { - return Tag.int_u64.create(a, byte); - } else { - buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = byte, - }; - return initPayload(&buffer.base); - } + return mod.intValue(Type.u8, byte); }, // No matter the index; all the elements are the same! .repeated => return val.castTag(.repeated).?.data, .aggregate => return val.castTag(.aggregate).?.data[index], - .slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer), + .slice => return val.castTag(.slice).?.data.ptr.elemValue(mod, index), - .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer), - .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer), - .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValueAdvanced(mod, index, arena, buffer), + .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValue(mod, index), + .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValue(mod, index), + .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValue(mod, index), .elem_ptr => { const data = val.castTag(.elem_ptr).?.data; - return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer); + return data.array_ptr.elemValue(mod, index + data.index); }, .field_ptr => { const data = val.castTag(.field_ptr).?.data; @@ -2795,7 +2597,7 @@ pub const Value = struct { const container_decl = mod.declPtr(decl_index); const field_type = data.container_ty.structFieldType(data.field_index); const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); - return field_val.elemValueAdvanced(mod, index, arena, buffer); + return field_val.elemValue(mod, index); } else unreachable; }, @@ -2803,11 +2605,11 @@ pub const Value = struct { // to have only one possible value itself. .the_only_possible_value => return val, - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValueAdvanced(mod, index, arena, buffer), + .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValue(mod, index), + .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValue(mod, index), - .opt_payload => return val.castTag(.opt_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), - .eu_payload => return val.castTag(.eu_payload).?.data.elemValueAdvanced(mod, index, arena, buffer), + .opt_payload => return val.castTag(.opt_payload).?.data.elemValue(mod, index), + .eu_payload => return val.castTag(.eu_payload).?.data.elemValue(mod, index), else => unreachable, }, @@ -3004,7 +2806,7 @@ pub const Value = struct { /// TODO: check for cases such as array that is not marked undef but all the element /// values are marked undef, or struct that is not marked undef but all fields are marked /// undef, etc. - pub fn anyUndef(self: Value, mod: *Module) bool { + pub fn anyUndef(self: Value, mod: *Module) !bool { switch (self.ip_index) { .undef => return true, .none => switch (self.tag()) { @@ -3012,18 +2814,16 @@ pub const Value = struct { const payload = self.castTag(.slice).?; const len = payload.data.len.toUnsignedInt(mod); - var elem_value_buf: ElemValueBuffer = undefined; - var i: usize = 0; - while (i < len) : (i += 1) { - const elem_val = payload.data.ptr.elemValueBuffer(mod, i, &elem_value_buf); - if (elem_val.anyUndef(mod)) return true; + for (0..len) |i| { + const elem_val = try payload.data.ptr.elemValue(mod, i); + if (try elem_val.anyUndef(mod)) return true; } }, .aggregate => { const payload = self.castTag(.aggregate).?; for (payload.data) |val| { - if (val.anyUndef(mod)) return true; + if (try val.anyUndef(mod)) return true; } }, else => {}, @@ -3036,35 +2836,37 @@ pub const Value = struct { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. - pub fn isNull(self: Value, mod: *const Module) bool { - return switch (self.ip_index) { + pub fn isNull(val: Value, mod: *const Module) bool { + return switch (val.ip_index) { .undef => unreachable, .unreachable_value => unreachable, - .null_value => true, - .none => switch (self.tag()) { + + .null_value, + .zero, + .zero_usize, + .zero_u8, + => true, + + .none => switch (val.tag()) { .opt_payload => false, // If it's not one of those two tags then it must be a C pointer value, // in which case the value 0 is null and other values are non-null. - .zero, - .the_only_possible_value, - => true, - - .one => false, - - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - => self.orderAgainstZero(mod).compare(.eq), + .the_only_possible_value => true, .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, else => false, }, - else => false, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.eqZero(), + inline .u64, .i64 => |x| x == 0, + }, + else => unreachable, + }, }; } @@ -3078,17 +2880,13 @@ pub const Value = struct { .unreachable_value => unreachable, .none => switch (self.tag()) { .@"error" => self.castTag(.@"error").?.data.name, - .int_u64 => @panic("TODO"), - .int_i64 => @panic("TODO"), - .int_big_positive => @panic("TODO"), - .int_big_negative => @panic("TODO"), - .one => @panic("TODO"), + .eu_payload => null, + .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, - - else => null, + else => unreachable, }, - else => null, + else => unreachable, }; } @@ -3147,10 +2945,10 @@ pub const Value = struct { pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, int_ty.vectorLen(mod)); + const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intToFloatScalar(elem_val, arena, float_ty.scalarType(mod), mod, opt_sema); + const elem_val = try val.elemValue(mod, i); + scalar.* = try intToFloatScalar(elem_val, arena, scalar_ty, mod, opt_sema); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3162,24 +2960,7 @@ pub const Value = struct { switch (val.ip_index) { .undef => return val, .none => switch (val.tag()) { - .zero, .one => return val, - .the_only_possible_value => return Value.initTag(.zero), // for i0, u0 - .int_u64 => { - return intToFloatInner(val.castTag(.int_u64).?.data, arena, float_ty, target); - }, - .int_i64 => { - return intToFloatInner(val.castTag(.int_i64).?.data, arena, float_ty, target); - }, - .int_big_positive => { - const limbs = val.castTag(.int_big_positive).?.data; - const float = bigIntToFloat(limbs, true); - return floatToValue(float, arena, float_ty, target); - }, - .int_big_negative => { - const limbs = val.castTag(.int_big_negative).?.data; - const float = bigIntToFloat(limbs, false); - return floatToValue(float, arena, float_ty, target); - }, + .the_only_possible_value => return Value.zero, // for i0, u0 .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -3198,7 +2979,16 @@ pub const Value = struct { }, else => unreachable, }, - else => unreachable, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .big_int => |big_int| { + const float = bigIntToFloat(big_int.limbs, big_int.positive); + return floatToValue(float, arena, float_ty, target); + }, + inline .u64, .i64 => |x| intToFloatInner(x, arena, float_ty, target), + }, + else => unreachable, + }, } } @@ -3238,22 +3028,6 @@ pub const Value = struct { wrapped_result: Value, }; - pub fn fromBigInt(arena: Allocator, big_int: BigIntConst) !Value { - if (big_int.positive) { - if (big_int.to(u64)) |x| { - return Value.Tag.int_u64.create(arena, x); - } else |_| { - return Value.Tag.int_big_positive.create(arena, big_int.limbs); - } - } else { - if (big_int.to(i64)) |x| { - return Value.Tag.int_i64.create(arena, x); - } else |_| { - return Value.Tag.int_big_negative.create(arena, big_int.limbs); - } - } - } - /// Supports (vectors of) integers only; asserts neither operand is undefined. pub fn intAddSat( lhs: Value, @@ -3264,12 +3038,11 @@ pub const Value = struct { ) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3299,7 +3072,7 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Supports (vectors of) integers only; asserts neither operand is undefined. @@ -3312,12 +3085,11 @@ pub const Value = struct { ) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } @@ -3347,7 +3119,7 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.subSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intMulWithOverflow( @@ -3360,12 +3132,11 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const overflowed_data = try arena.alloc(Value, ty.vectorLen(mod)); const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; } @@ -3408,7 +3179,7 @@ pub const Value = struct { return OverflowArithmeticResult{ .overflow_bit = boolToInt(overflowed), - .wrapped_result = try fromBigInt(arena, result_bigint.toConst()), + .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), }; } @@ -3423,10 +3194,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3467,10 +3236,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3510,7 +3277,7 @@ pub const Value = struct { ); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -3542,8 +3309,7 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); + const elem_val = try val.elemValue(mod, i); scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3572,7 +3338,7 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. @@ -3580,19 +3346,17 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseAndScalar(lhs, rhs, allocator, mod); + return bitwiseAndScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseAndScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { + pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without @@ -3608,7 +3372,7 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. @@ -3616,10 +3380,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -3632,12 +3394,7 @@ pub const Value = struct { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - - const all_ones = if (ty.isSignedInt(mod)) - try Value.Tag.int_i64.create(arena, -1) - else - try ty.maxInt(arena, mod); - + const all_ones = if (ty.isSignedInt(mod)) Value.negative_one else try ty.maxIntScalar(mod); return bitwiseXor(anded, all_ones, ty, arena, mod); } @@ -3646,19 +3403,17 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseOrScalar(lhs, rhs, allocator, mod); + return bitwiseOrScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseOrScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { + pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without @@ -3673,27 +3428,26 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return bitwiseXorScalar(lhs, rhs, allocator, mod); + return bitwiseXorScalar(lhs, rhs, ty, allocator, mod); } /// operands must be integers; handles undefined. - pub fn bitwiseXorScalar(lhs: Value, rhs: Value, arena: Allocator, mod: *Module) !Value { + pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without @@ -3709,25 +3463,24 @@ pub const Value = struct { ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivScalar(lhs, rhs, allocator, mod); + return intDivScalar(lhs, rhs, ty, allocator, mod); } - pub fn intDivScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3749,25 +3502,24 @@ pub const Value = struct { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_q.toConst()); + return mod.intValue_big(ty, result_q.toConst()); } pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intDivFloorScalar(lhs, rhs, allocator, mod); + return intDivFloorScalar(lhs, rhs, ty, allocator, mod); } - pub fn intDivFloorScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3789,25 +3541,24 @@ pub const Value = struct { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_q.toConst()); + return mod.intValue_big(ty, result_q.toConst()); } pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intModScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intModScalar(lhs, rhs, allocator, mod); + return intModScalar(lhs, rhs, ty, allocator, mod); } - pub fn intModScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -3829,7 +3580,7 @@ pub const Value = struct { var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return fromBigInt(allocator, result_r.toConst()); + return mod.intValue_big(ty, result_r.toConst()); } /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. @@ -3877,46 +3628,44 @@ pub const Value = struct { } pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatRemScalar(lhs, rhs, float_type, arena, target); + return floatRemScalar(lhs, rhs, float_type, arena, mod); } - pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { + pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *const Module) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @rem(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @rem(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @rem(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @rem(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @rem(lhs_val, rhs_val)); }, else => unreachable, @@ -3924,46 +3673,44 @@ pub const Value = struct { } pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatModScalar(lhs, rhs, float_type, arena, target); + return floatModScalar(lhs, rhs, float_type, arena, mod); } - pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, target: Target) !Value { + pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *const Module) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @mod(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @mod(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @mod(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @mod(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @mod(lhs_val, rhs_val)); }, else => unreachable, @@ -3973,19 +3720,18 @@ pub const Value = struct { pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try intMulScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intMulScalar(lhs, rhs, allocator, mod); + return intMulScalar(lhs, rhs, ty, allocator, mod); } - pub fn intMulScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -4003,20 +3749,20 @@ pub const Value = struct { ); defer allocator.free(limbs_buffer); result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, allocator); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, bits, mod); + const elem_val = try val.elemValue(mod, i); + scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, bits, mod); + return intTruncScalar(val, ty, allocator, signedness, bits, mod); } /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. @@ -4030,19 +3776,25 @@ pub const Value = struct { ) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - var bits_buf: Value.ElemValueBuffer = undefined; - const bits_elem = bits.elemValueBuffer(mod, i, &bits_buf); - scalar.* = try intTruncScalar(elem_val, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); + const elem_val = try val.elemValue(mod, i); + const bits_elem = try bits.elemValue(mod, i); + scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return intTruncScalar(val, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); + return intTruncScalar(val, ty, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); } - pub fn intTruncScalar(val: Value, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { + pub fn intTruncScalar( + val: Value, + ty: Type, + allocator: Allocator, + signedness: std.builtin.Signedness, + bits: u16, + mod: *Module, + ) !Value { if (bits == 0) return Value.zero; var val_space: Value.BigIntSpace = undefined; @@ -4055,25 +3807,24 @@ pub const Value = struct { var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.truncate(val_bigint, signedness, bits); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shlScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shlScalar(lhs, rhs, allocator, mod); + return shlScalar(lhs, rhs, ty, allocator, mod); } - pub fn shlScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -4089,7 +3840,7 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftLeft(lhs_bigint, shift); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shlWithOverflow( @@ -4103,10 +3854,8 @@ pub const Value = struct { const overflowed_data = try allocator.alloc(Value, ty.vectorLen(mod)); const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); overflowed_data[i] = of_math_result.overflow_bit; scalar.* = of_math_result.wrapped_result; @@ -4146,7 +3895,7 @@ pub const Value = struct { } return OverflowArithmeticResult{ .overflow_bit = boolToInt(overflowed), - .wrapped_result = try fromBigInt(allocator, result_bigint.toConst()), + .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), }; } @@ -4160,10 +3909,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -4195,7 +3942,7 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftLeftSat(lhs_bigint, shift, info.signedness, info.bits); - return fromBigInt(arena, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn shlTrunc( @@ -4208,10 +3955,8 @@ pub const Value = struct { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); @@ -4235,19 +3980,18 @@ pub const Value = struct { pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try shrScalar(lhs_elem, rhs_elem, allocator, mod); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); } return Value.Tag.aggregate.create(allocator, result_data); } - return shrScalar(lhs, rhs, allocator, mod); + return shrScalar(lhs, rhs, ty, allocator, mod); } - pub fn shrScalar(lhs: Value, rhs: Value, allocator: Allocator, mod: *Module) !Value { + pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; @@ -4275,7 +4019,7 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftRight(lhs_bigint, shift); - return fromBigInt(allocator, result_bigint.toConst()); + return mod.intValue_big(ty, result_bigint.toConst()); } pub fn floatNeg( @@ -4284,31 +4028,30 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatNegScalar(val, float_type, arena, target); + return floatNegScalar(val, float_type, arena, mod); } pub fn floatNegScalar( val: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { - 16 => return Value.Tag.float_16.create(arena, -val.toFloat(f16)), - 32 => return Value.Tag.float_32.create(arena, -val.toFloat(f32)), - 64 => return Value.Tag.float_64.create(arena, -val.toFloat(f64)), - 80 => return Value.Tag.float_80.create(arena, -val.toFloat(f80)), - 128 => return Value.Tag.float_128.create(arena, -val.toFloat(f128)), + 16 => return Value.Tag.float_16.create(arena, -val.toFloat(f16, mod)), + 32 => return Value.Tag.float_32.create(arena, -val.toFloat(f32, mod)), + 64 => return Value.Tag.float_64.create(arena, -val.toFloat(f64, mod)), + 80 => return Value.Tag.float_80.create(arena, -val.toFloat(f80, mod)), + 128 => return Value.Tag.float_128.create(arena, -val.toFloat(f128, mod)), else => unreachable, } } @@ -4320,19 +4063,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivScalar(lhs, rhs, float_type, arena, target); + return floatDivScalar(lhs, rhs, float_type, arena, mod); } pub fn floatDivScalar( @@ -4340,32 +4080,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, lhs_val / rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, lhs_val / rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, lhs_val / rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, lhs_val / rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, lhs_val / rhs_val); }, else => unreachable, @@ -4379,19 +4120,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivFloorScalar(lhs, rhs, float_type, arena, target); + return floatDivFloorScalar(lhs, rhs, float_type, arena, mod); } pub fn floatDivFloorScalar( @@ -4399,32 +4137,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @divFloor(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @divFloor(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @divFloor(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @divFloor(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @divFloor(lhs_val, rhs_val)); }, else => unreachable, @@ -4438,19 +4177,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatDivTruncScalar(lhs, rhs, float_type, arena, target); + return floatDivTruncScalar(lhs, rhs, float_type, arena, mod); } pub fn floatDivTruncScalar( @@ -4458,32 +4194,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @divTrunc(lhs_val, rhs_val)); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @divTrunc(lhs_val, rhs_val)); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @divTrunc(lhs_val, rhs_val)); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @divTrunc(lhs_val, rhs_val)); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @divTrunc(lhs_val, rhs_val)); }, else => unreachable, @@ -4497,19 +4234,16 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(mod, i, &rhs_buf); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, target); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floatMulScalar(lhs, rhs, float_type, arena, target); + return floatMulScalar(lhs, rhs, float_type, arena, mod); } pub fn floatMulScalar( @@ -4517,32 +4251,33 @@ pub const Value = struct { rhs: Value, float_type: Type, arena: Allocator, - target: Target, + mod: *const Module, ) !Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); + const lhs_val = lhs.toFloat(f16, mod); + const rhs_val = rhs.toFloat(f16, mod); return Value.Tag.float_16.create(arena, lhs_val * rhs_val); }, 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); + const lhs_val = lhs.toFloat(f32, mod); + const rhs_val = rhs.toFloat(f32, mod); return Value.Tag.float_32.create(arena, lhs_val * rhs_val); }, 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); + const lhs_val = lhs.toFloat(f64, mod); + const rhs_val = rhs.toFloat(f64, mod); return Value.Tag.float_64.create(arena, lhs_val * rhs_val); }, 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); + const lhs_val = lhs.toFloat(f80, mod); + const rhs_val = rhs.toFloat(f80, mod); return Value.Tag.float_80.create(arena, lhs_val * rhs_val); }, 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); + const lhs_val = lhs.toFloat(f128, mod); + const rhs_val = rhs.toFloat(f128, mod); return Value.Tag.float_128.create(arena, lhs_val * rhs_val); }, else => unreachable, @@ -4550,39 +4285,38 @@ pub const Value = struct { } pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return sqrtScalar(val, float_type, arena, target); + return sqrtScalar(val, float_type, arena, mod); } - pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn sqrtScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @sqrt(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @sqrt(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @sqrt(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @sqrt(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @sqrt(f)); }, else => unreachable, @@ -4590,39 +4324,38 @@ pub const Value = struct { } pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return sinScalar(val, float_type, arena, target); + return sinScalar(val, float_type, arena, mod); } - pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn sinScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @sin(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @sin(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @sin(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @sin(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @sin(f)); }, else => unreachable, @@ -4630,39 +4363,38 @@ pub const Value = struct { } pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return cosScalar(val, float_type, arena, target); + return cosScalar(val, float_type, arena, mod); } - pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn cosScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @cos(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @cos(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @cos(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @cos(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @cos(f)); }, else => unreachable, @@ -4670,39 +4402,38 @@ pub const Value = struct { } pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return tanScalar(val, float_type, arena, target); + return tanScalar(val, float_type, arena, mod); } - pub fn tanScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn tanScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @tan(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @tan(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @tan(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @tan(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @tan(f)); }, else => unreachable, @@ -4710,39 +4441,38 @@ pub const Value = struct { } pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try expScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return expScalar(val, float_type, arena, target); + return expScalar(val, float_type, arena, mod); } - pub fn expScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn expScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @exp(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @exp(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @exp(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @exp(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @exp(f)); }, else => unreachable, @@ -4750,39 +4480,38 @@ pub const Value = struct { } pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return exp2Scalar(val, float_type, arena, target); + return exp2Scalar(val, float_type, arena, mod); } - pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn exp2Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @exp2(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @exp2(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @exp2(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @exp2(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @exp2(f)); }, else => unreachable, @@ -4790,39 +4519,38 @@ pub const Value = struct { } pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try logScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return logScalar(val, float_type, arena, target); + return logScalar(val, float_type, arena, mod); } - pub fn logScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn logScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @log(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @log(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @log(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @log(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @log(f)); }, else => unreachable, @@ -4830,39 +4558,38 @@ pub const Value = struct { } pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return log2Scalar(val, float_type, arena, target); + return log2Scalar(val, float_type, arena, mod); } - pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn log2Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @log2(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @log2(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @log2(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @log2(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @log2(f)); }, else => unreachable, @@ -4870,39 +4597,38 @@ pub const Value = struct { } pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return log10Scalar(val, float_type, arena, target); + return log10Scalar(val, float_type, arena, mod); } - pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn log10Scalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @log10(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @log10(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @log10(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @log10(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @log10(f)); }, else => unreachable, @@ -4910,39 +4636,38 @@ pub const Value = struct { } pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return fabsScalar(val, float_type, arena, target); + return fabsScalar(val, float_type, arena, mod); } - pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn fabsScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @fabs(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @fabs(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @fabs(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @fabs(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @fabs(f)); }, else => unreachable, @@ -4950,39 +4675,38 @@ pub const Value = struct { } pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return floorScalar(val, float_type, arena, target); + return floorScalar(val, float_type, arena, mod); } - pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn floorScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @floor(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @floor(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @floor(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @floor(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @floor(f)); }, else => unreachable, @@ -4990,39 +4714,38 @@ pub const Value = struct { } pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return ceilScalar(val, float_type, arena, target); + return ceilScalar(val, float_type, arena, mod); } - pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn ceilScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @ceil(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @ceil(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @ceil(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @ceil(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @ceil(f)); }, else => unreachable, @@ -5030,39 +4753,38 @@ pub const Value = struct { } pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return roundScalar(val, float_type, arena, target); + return roundScalar(val, float_type, arena, mod); } - pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn roundScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @round(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @round(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @round(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @round(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @round(f)); }, else => unreachable, @@ -5070,39 +4792,38 @@ pub const Value = struct { } pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(mod, i, &buf); - scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, target); + const elem_val = try val.elemValue(mod, i); + scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), arena, mod); } return Value.Tag.aggregate.create(arena, result_data); } - return truncScalar(val, float_type, arena, target); + return truncScalar(val, float_type, arena, mod); } - pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, target: Target) Allocator.Error!Value { + pub fn truncScalar(val: Value, float_type: Type, arena: Allocator, mod: *const Module) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const f = val.toFloat(f16); + const f = val.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @trunc(f)); }, 32 => { - const f = val.toFloat(f32); + const f = val.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @trunc(f)); }, 64 => { - const f = val.toFloat(f64); + const f = val.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @trunc(f)); }, 80 => { - const f = val.toFloat(f80); + const f = val.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @trunc(f)); }, 128 => { - const f = val.toFloat(f128); + const f = val.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @trunc(f)); }, else => unreachable, @@ -5117,28 +4838,24 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - const target = mod.getTarget(); if (float_type.zigTypeTag(mod) == .Vector) { const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var mulend1_buf: Value.ElemValueBuffer = undefined; - const mulend1_elem = mulend1.elemValueBuffer(mod, i, &mulend1_buf); - var mulend2_buf: Value.ElemValueBuffer = undefined; - const mulend2_elem = mulend2.elemValueBuffer(mod, i, &mulend2_buf); - var addend_buf: Value.ElemValueBuffer = undefined; - const addend_elem = addend.elemValueBuffer(mod, i, &addend_buf); + const mulend1_elem = try mulend1.elemValue(mod, i); + const mulend2_elem = try mulend2.elemValue(mod, i); + const addend_elem = try addend.elemValue(mod, i); scalar.* = try mulAddScalar( float_type.scalarType(mod), mulend1_elem, mulend2_elem, addend_elem, arena, - target, + mod, ); } return Value.Tag.aggregate.create(arena, result_data); } - return mulAddScalar(float_type, mulend1, mulend2, addend, arena, target); + return mulAddScalar(float_type, mulend1, mulend2, addend, arena, mod); } pub fn mulAddScalar( @@ -5147,37 +4864,38 @@ pub const Value = struct { mulend2: Value, addend: Value, arena: Allocator, - target: Target, + mod: *const Module, ) Allocator.Error!Value { + const target = mod.getTarget(); switch (float_type.floatBits(target)) { 16 => { - const m1 = mulend1.toFloat(f16); - const m2 = mulend2.toFloat(f16); - const a = addend.toFloat(f16); + const m1 = mulend1.toFloat(f16, mod); + const m2 = mulend2.toFloat(f16, mod); + const a = addend.toFloat(f16, mod); return Value.Tag.float_16.create(arena, @mulAdd(f16, m1, m2, a)); }, 32 => { - const m1 = mulend1.toFloat(f32); - const m2 = mulend2.toFloat(f32); - const a = addend.toFloat(f32); + const m1 = mulend1.toFloat(f32, mod); + const m2 = mulend2.toFloat(f32, mod); + const a = addend.toFloat(f32, mod); return Value.Tag.float_32.create(arena, @mulAdd(f32, m1, m2, a)); }, 64 => { - const m1 = mulend1.toFloat(f64); - const m2 = mulend2.toFloat(f64); - const a = addend.toFloat(f64); + const m1 = mulend1.toFloat(f64, mod); + const m2 = mulend2.toFloat(f64, mod); + const a = addend.toFloat(f64, mod); return Value.Tag.float_64.create(arena, @mulAdd(f64, m1, m2, a)); }, 80 => { - const m1 = mulend1.toFloat(f80); - const m2 = mulend2.toFloat(f80); - const a = addend.toFloat(f80); + const m1 = mulend1.toFloat(f80, mod); + const m2 = mulend2.toFloat(f80, mod); + const a = addend.toFloat(f80, mod); return Value.Tag.float_80.create(arena, @mulAdd(f80, m1, m2, a)); }, 128 => { - const m1 = mulend1.toFloat(f128); - const m2 = mulend2.toFloat(f128); - const a = addend.toFloat(f128); + const m1 = mulend1.toFloat(f128, mod); + const m2 = mulend2.toFloat(f128, mod); + const a = addend.toFloat(f128, mod); return Value.Tag.float_128.create(arena, @mulAdd(f128, m1, m2, a)); }, else => unreachable, @@ -5186,13 +4904,14 @@ pub const Value = struct { /// If the value is represented in-memory as a series of bytes that all /// have the same value, return that byte value, otherwise null. - pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module, value_buffer: *Payload.U64) !?Value { + pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module) !?Value { const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; assert(abi_size >= 1); const byte_buffer = try mod.gpa.alloc(u8, abi_size); defer mod.gpa.free(byte_buffer); writeToMemory(val, ty, mod, byte_buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => return null, // TODO: The writeToMemory function was originally created for the purpose // of comptime pointer casting. However, it is now additionally being used @@ -5206,11 +4925,7 @@ pub const Value = struct { for (byte_buffer[1..]) |byte| { if (byte != first_byte) return null; } - value_buffer.* = .{ - .base = .{ .tag = .int_u64 }, - .data = first_byte, - }; - return initPayload(&value_buffer.base); + return try mod.intValue(Type.u8, first_byte); } pub fn isGenericPoison(val: Value) bool { @@ -5226,30 +4941,6 @@ pub const Value = struct { data: u32, }; - pub const U64 = struct { - base: Payload, - data: u64, - }; - - pub const I64 = struct { - base: Payload, - data: i64, - }; - - pub const BigInt = struct { - base: Payload, - data: []const std.math.big.Limb, - - pub fn asBigInt(self: BigInt) BigIntConst { - const positive = switch (self.base.tag) { - .int_big_positive => true, - .int_big_negative => false, - else => unreachable, - }; - return BigIntConst{ .limbs = self.data, .positive = positive }; - } - }; - pub const Function = struct { base: Payload, data: *Module.Fn, @@ -5452,12 +5143,9 @@ pub const Value = struct { pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; - pub const zero = initTag(.zero); - pub const one = initTag(.one); - pub const negative_one: Value = .{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &negative_one_payload.base }, - }; + pub const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; + pub const one: Value = .{ .ip_index = .one, .legacy = undefined }; + pub const negative_one: Value = .{ .ip_index = .negative_one, .legacy = undefined }; pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; @@ -5515,8 +5203,3 @@ pub const Value = struct { } } }; - -var negative_one_payload: Value.Payload.I64 = .{ - .base = .{ .tag = .int_i64 }, - .data = -1, -}; -- cgit v1.2.3 From 2ffef605c75b62ba49e21bfb3256537a4a2c0a5e Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 7 May 2023 22:12:04 +0100 Subject: Replace uses of Value.zero, Value.one, Value.negative_one This is a bit nasty, mainly because Type.onePossibleValue is now errorable, which is a quite viral change. --- src/Air.zig | 2 +- src/Module.zig | 12 +- src/Sema.zig | 257 ++++++++++++++++++++++++++----------------- src/TypedValue.zig | 13 ++- src/arch/aarch64/CodeGen.zig | 4 +- src/arch/arm/CodeGen.zig | 4 +- src/arch/riscv64/CodeGen.zig | 4 +- src/arch/sparc64/CodeGen.zig | 4 +- src/arch/wasm/CodeGen.zig | 10 +- src/arch/x86_64/CodeGen.zig | 12 +- src/codegen.zig | 8 +- src/codegen/c.zig | 64 +++++------ src/codegen/llvm.zig | 18 +-- src/codegen/spirv.zig | 10 +- src/type.zig | 44 ++++---- src/value.zig | 43 ++++---- 16 files changed, 286 insertions(+), 223 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index 549583e697..8059b9e57f 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1485,7 +1485,7 @@ pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { } /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Inst.Ref, mod: *const Module) ?Value { +pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const ref_int = @enumToInt(inst); if (ref_int < ref_start_index) { const ip_index = @intToEnum(InternPool.Index, ref_int); diff --git a/src/Module.zig b/src/Module.zig index f56235c933..3f5dc8039e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5750,7 +5750,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const arg_val = if (!arg_tv.val.isGenericPoison()) arg_tv.val - else if (arg_tv.ty.onePossibleValue(mod)) |opv| + else if (try arg_tv.ty.onePossibleValue(mod)) |opv| opv else break :t arg_tv.ty; @@ -6887,6 +6887,16 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { } pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + if (std.debug.runtime_safety) { + // TODO: decide if this also works for ABI int types like enums + const tag = ty.zigTypeTag(mod); + assert(tag == .Int or tag == .ComptimeInt); + } + if (@TypeOf(x) == comptime_int) { + if (comptime std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); + if (comptime std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); + @compileError("Out-of-range comptime_int passed to Module.intValue"); + } if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); var limbs_buffer: [4]usize = undefined; diff --git a/src/Sema.zig b/src/Sema.zig index dc5bb1cdea..9b1da74982 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3062,9 +3062,9 @@ fn zirEnumDecl( } } else if (any_values) { const tag_val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, enum_obj.tag_ty) + try sema.intAdd(val, try mod.intValue(enum_obj.tag_ty, 1), enum_obj.tag_ty) else - Value.zero; + try mod.intValue(enum_obj.tag_ty, 0); last_tag_val = tag_val; const copied_tag_val = try tag_val.copy(decl_arena_allocator); const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ @@ -4709,7 +4709,7 @@ fn zirValidateArrayInit( // Determine whether the value stored to this pointer is comptime-known. if (array_ty.isTuple()) { - if (array_ty.structFieldValueComptime(mod, i)) |opv| { + if (try array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv; continue; } @@ -8132,7 +8132,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (!op_ty.isAnyError()) { const names = op_ty.errorSetNames(); switch (names.len) { - 0 => return sema.addConstant(Type.err_int, Value.zero), + 0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)), 1 => return sema.addIntUnsigned(Type.err_int, sema.mod.global_error_set.get(names[0]).?), else => {}, } @@ -8167,7 +8167,7 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand); - const zero_val = try sema.addConstant(Type.err_int, Value.zero); + const zero_val = try sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)); const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val); const ok = try block.addBinOp(.bit_and, is_lt_len, is_non_zero); try sema.addSafetyCheck(block, ok, .invalid_error_code); @@ -9656,7 +9656,7 @@ fn intCast( if (wanted_bits == 0) { const ok = if (is_vector) ok: { - const zeros = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zeros = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ @@ -9665,7 +9665,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(sema.typeOf(operand), Value.zero); + const zero_inst = try sema.addConstant(sema.typeOf(operand), try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; @@ -9705,8 +9705,9 @@ fn intCast( // If the destination type is signed, then we need to double its // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { - const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, sema.mod); - break :range_val try sema.intAdd(range_minus_one, Value.one, unsigned_operand_ty); + const one = try mod.intValue(unsigned_operand_ty, 1); + const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, sema.mod); + break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty); } else dest_max_val; const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val); @@ -9747,7 +9748,7 @@ fn intCast( // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); const zero_inst = try sema.addConstant(operand_ty, zero_val); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); const all_in_range = try block.addInst(.{ @@ -9759,7 +9760,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(operand_ty, Value.zero); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst); break :ok is_in_range; }; @@ -11250,7 +11251,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ // Previous validation has resolved any possible lazy values. - item = try sema.intAddScalar(item, Value.one, operand_ty); + item = try sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty); }) { cases_len += 1; @@ -11696,7 +11697,7 @@ const RangeSetUnhandledIterator = struct { fn next(it: *RangeSetUnhandledIterator) !?Value { while (it.range_i < it.ranges.len) : (it.range_i += 1) { if (!it.first) { - it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); } it.first = false; if (it.cur.compareScalar(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { @@ -11705,7 +11706,7 @@ const RangeSetUnhandledIterator = struct { it.cur = it.ranges[it.range_i].last; } if (!it.first) { - it.cur = try it.sema.intAddScalar(it.cur, Value.one, it.ty); + it.cur = try it.sema.intAddScalar(it.cur, try it.sema.mod.intValue(it.ty, 1), it.ty); } it.first = false; if (it.cur.compareScalar(.lte, it.max, it.ty, it.sema.mod)) { @@ -12150,7 +12151,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. embed_file.owner_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), embed_file.bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), embed_file.bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), 0, // default alignment ); @@ -12235,14 +12236,14 @@ fn zirShl( var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(sema.mod, i); - if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { + if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -12348,7 +12349,7 @@ fn zirShl( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .shl_overflow); @@ -12417,14 +12418,14 @@ fn zirShr( var i: usize = 0; while (i < rhs_ty.vectorLen(mod)) : (i += 1) { const rhs_elem = try rhs_val.elemValue(sema.mod, i); - if (rhs_elem.compareHetero(.lt, Value.zero, mod)) { + if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ rhs_elem.fmtValue(scalar_ty, sema.mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, mod)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ rhs_val.fmtValue(scalar_ty, sema.mod), }); @@ -13156,9 +13157,9 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) + try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) else - try sema.resolveInst(.zero); + try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13180,9 +13181,9 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) + try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) else - try sema.resolveInst(.zero); + try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13293,9 +13294,14 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13318,7 +13324,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13427,9 +13433,14 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.failWithUseOfUndef(block, rhs_src); } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13507,8 +13518,13 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else ok: { const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (resolved_type.zigTypeTag(mod) == .Vector) { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); break :ok try block.addInst(.{ @@ -13519,7 +13535,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, }); } else { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); const is_in_range = try block.addBinOp(.cmp_eq, remainder, zero); break :ok is_in_range; } @@ -13592,9 +13608,14 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13612,7 +13633,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13708,9 +13729,14 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } @@ -13727,7 +13753,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_val.isUndef()) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13862,8 +13888,9 @@ fn addDivByZeroSafety( if (maybe_rhs_val != null) return; const mod = sema.mod; + const scalar_zero = if (is_int) try mod.intValue(resolved_type.scalarType(mod), 0) else Value.float_zero; // TODO migrate to internpool const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); break :ok try block.addInst(.{ @@ -13874,7 +13901,7 @@ fn addDivByZeroSafety( } }, }); } else ok: { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); break :ok try block.addBinOp(if (is_int) .cmp_neq else .cmp_neq_optimized, casted_rhs, zero); }; try sema.addSafetyCheck(block, ok, .divide_by_zero); @@ -13946,9 +13973,14 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithUseOfUndef(block, lhs_src); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } else if (lhs_scalar_ty.isSignedInt(mod)) { @@ -14325,6 +14357,7 @@ fn zirOverflowArithmetic( wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { + const zero = try mod.intValue(dest_ty.scalarType(mod), 0); switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored @@ -14332,12 +14365,12 @@ fn zirOverflowArithmetic( // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14358,7 +14391,7 @@ fn zirOverflowArithmetic( if (rhs_val.isUndef()) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef()) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; @@ -14373,12 +14406,13 @@ fn zirOverflowArithmetic( // If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is one, the result is the other and no overflow occured. // Otherwise, if either of the arguments is undefined, both results are undefined. + const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; - } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } } } @@ -14386,9 +14420,9 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef()) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; - } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; + } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } } @@ -14410,12 +14444,12 @@ fn zirOverflowArithmetic( // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14766,6 +14800,11 @@ fn analyzeArithmetic( // If either of the operands are inf, and the other operand is zero, // the result is nan. // If either of the operands are nan, the result is nan. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.isNan()) { @@ -14783,11 +14822,11 @@ fn analyzeArithmetic( break :lz; } const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14813,11 +14852,11 @@ fn analyzeArithmetic( break :rz; } const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14849,15 +14888,20 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14869,11 +14913,11 @@ fn analyzeArithmetic( } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14892,15 +14936,20 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => Value.float_zero, // TODO migrate to internpool + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_rhs; } } @@ -14911,11 +14960,11 @@ fn analyzeArithmetic( } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); + } else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14968,7 +15017,7 @@ fn analyzeArithmetic( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .integer_overflow); @@ -15785,7 +15834,7 @@ fn zirBuiltinSrc( const name = std.mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len - 1, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes), 0, // default alignment ); @@ -15798,7 +15847,7 @@ fn zirBuiltinSrc( // The compiler must not call realpath anywhere. const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), name.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), name.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), 0, // default alignment ); @@ -16148,7 +16197,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16256,7 +16305,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16344,7 +16393,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16454,7 +16503,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16496,7 +16545,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16692,7 +16741,7 @@ fn typeInfoNamespaceDecls( defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -17884,7 +17933,7 @@ fn zirStructInit( } found_fields[field_index] = item.data.field_type; field_inits[field_index] = try sema.resolveInst(item.data.init); - if (!is_packed) if (resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (!is_packed) if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; @@ -18544,8 +18593,8 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand = try sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef()) return sema.addConstUndef(Type.u1); - if (val.toBool(mod)) return sema.addConstant(Type.u1, Value.one); - return sema.addConstant(Type.u1, Value.zero); + if (val.toBool(mod)) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1)); + return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); } return block.addUnOp(.bool_to_int, operand); } @@ -19761,7 +19810,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero, Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -19804,17 +19853,17 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.requireRuntimeBlock(block, inst_data.src(), operand_src); if (dest_ty.intInfo(mod).bits == 0) { if (block.wantSafety()) { - const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, Value.zero)); + const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0))); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } - return sema.addConstant(dest_ty, Value.zero); + return sema.addConstant(dest_ty, try mod.intValue(dest_ty, 0)); } const result = try block.addTyOp(if (block.float_mode == .Optimized) .float_to_int_optimized else .float_to_int, dest_ty, operand); if (block.wantSafety()) { const back = try block.addTyOp(.int_to_float, operand_ty, result); const diff = try block.addBinOp(.sub, operand, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, Value.one)); - const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, Value.negative_one)); + const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 1))); + const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, -1))); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } @@ -21398,7 +21447,7 @@ fn analyzeShuffle( expand_mask_values[i] = try mod.intValue(Type.comptime_int, i); } while (i < max_len) : (i += 1) { - expand_mask_values[i] = Value.negative_one; + expand_mask_values[i] = try mod.intValue(Type.comptime_int, -1); } const expand_mask = try Value.Tag.aggregate.create(sema.arena, expand_mask_values); @@ -24504,7 +24553,7 @@ fn finishFieldCallBind( const container_ty = ptr_ty.childType(mod); if (container_ty.zigTypeTag(mod) == .Struct) { - if (container_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; } } @@ -24815,7 +24864,7 @@ fn tupleFieldValByIndex( const mod = sema.mod; const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); } @@ -24828,7 +24877,7 @@ fn tupleFieldValByIndex( return sema.addConstant(field_ty, field_values[field_index]); } - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(field_ty, default_val); } @@ -25205,7 +25254,7 @@ fn tupleFieldPtr( .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ .field_ty = field_ty, .field_val = default_val, @@ -25256,13 +25305,13 @@ fn tupleField( const field_ty = tuple_ty.structFieldType(field_index); - if (tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, mod, field_index)); + return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25812,7 +25861,7 @@ fn coerceExtra( if (inst_info.size == .Slice) { assert(dest_info.sentinel == null); if (inst_info.sentinel == null or - !inst_info.sentinel.?.eql(Value.zero, dest_info.pointee_type, sema.mod)) + !inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, sema.mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -25879,7 +25928,7 @@ fn coerceExtra( try mod.intValue(Type.usize, dest_info.@"align") else try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), - .len = Value.zero, + .len = try mod.intValue(Type.usize, 0), }); return sema.addConstant(dest_ty, slice_val); } @@ -28234,7 +28283,7 @@ fn beginComptimePtrLoad( const field_ty = field_ptr.container_ty.structFieldType(field_index); deref.pointee = TypedValue{ .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, mod, field_index), + .val = try tv.val.fieldValue(tv.ty, mod, field_index), }; } break :blk deref; @@ -32532,9 +32581,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk try val.copy(decl_arena_allocator); } else blk: { const val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, int_tag_ty) + try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty) else - Value.zero; + try mod.intValue(int_tag_ty, 0); last_tag_val = val; break :blk try val.copy(decl_arena_allocator); @@ -32903,7 +32952,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } @@ -33049,7 +33098,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } if (enum_obj.fields.count() == 1) { if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_obj.values.keys()[0]; } @@ -33066,7 +33115,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { switch (enum_obj.fields.count()) { 0 => return Value.@"unreachable", 1 => if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_obj.values.keys()[0]; }, @@ -33078,14 +33127,14 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const enum_simple = resolved_ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return Value.zero, + 1 => return try mod.intValue(ty, 0), else => return null, } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 28212a164c..828fb610d4 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -61,7 +61,10 @@ pub fn format( ) !void { _ = options; comptime std.debug.assert(fmt.len == 0); - return ctx.tv.print(writer, 3, ctx.mod); + return ctx.tv.print(writer, 3, ctx.mod) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function + else => |e| return e, + }; } /// Prints the Value according to the Type, not according to the Value Tag. @@ -70,7 +73,7 @@ pub fn print( writer: anytype, level: u8, mod: *Module, -) @TypeOf(writer).Error!void { +) (@TypeOf(writer).Error || Allocator.Error)!void { var val = tv.val; var ty = tv.ty; if (val.isVariable(mod)) @@ -95,7 +98,7 @@ pub fn print( } try print(.{ .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, mod, i), + .val = try val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (ty.structFieldCount() > max_aggregate_items) { @@ -112,7 +115,7 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, mod, i); + const elem = try val.fieldValue(ty, mod, i); if (elem.isUndef()) break :str; buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } @@ -129,7 +132,7 @@ pub fn print( if (i != 0) try writer.writeAll(", "); try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, mod, i), + .val = try val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } if (len > max_aggregate_items) { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 95a8350c7d..ea3814a20e 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4311,7 +4311,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -6154,7 +6154,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index cc2bc3a613..967a6dd753 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4291,7 +4291,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -6101,7 +6101,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 1008d527f6..5cf621488e 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1743,7 +1743,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); @@ -2551,7 +2551,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); switch (self.air.instructions.items(.tag)[inst_index]) { diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 4231222d4b..2cb35460c2 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1343,7 +1343,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -4575,7 +4575,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { return self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref, mod).?, + .val = (try self.air.value(ref, mod)).?, }); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 327e2c13e0..36b805cf94 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -789,7 +789,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { assert(!gop.found_existing); const mod = func.bin_file.base.options.module.?; - const val = func.air.value(ref, mod).?; + const val = (try func.air.value(ref, mod)).?; const ty = func.typeOf(ref); if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; @@ -2195,7 +2195,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod); const callee: ?Decl.Index = blk: { - const func_val = func.air.value(pl_op.operand, mod) orelse break :blk null; + const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; if (func_val.castTag(.function)) |function| { _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); @@ -3138,7 +3138,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); - const err_val = if (!is_pl) val else Value.zero; + const err_val = if (!is_pl) val else try mod.intValue(error_type, 0); return func.lowerConstant(err_val, error_type); } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); @@ -3792,7 +3792,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { errdefer func.gpa.free(values); for (items, 0..) |ref, i| { - const item_val = func.air.value(ref, mod).?; + const item_val = (try func.air.value(ref, mod)).?; const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; @@ -5048,7 +5048,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(result_ty); const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset for (elements, 0..) |elem, elem_index| { - if (result_ty.structFieldValueComptime(mod, elem_index) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; const elem_ty = result_ty.structFieldType(elem_index); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 51c6bc79e6..b208656a41 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2768,7 +2768,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const full_ty = try mod.vectorType(.{ .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), - .child = src_ty.childType(mod).ip_index, + .child = elem_ty.ip_index, }); const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); @@ -8107,7 +8107,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee, mod)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (if (func_value.castTag(.function)) |func_payload| func_payload.data.owner_decl else if (func_value.castTag(.decl_ref)) |decl_ref_payload| @@ -11265,7 +11265,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .{ .immediate = result_ty.abiSize(mod) }, ); for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); @@ -11337,7 +11337,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } } } else for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(mod, elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); @@ -11601,7 +11601,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref, mod).?, + .val = (try self.air.value(ref, mod)).?, })); break :tracking gop.value_ptr; }, @@ -11614,7 +11614,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } } - return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref, mod).? }); + return self.genTypedValue(.{ .ty = ty, .val = (try self.air.value(ref, mod)).? }); } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { diff --git a/src/codegen.zig b/src/codegen.zig index 9c9868892f..8bd478bf7c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -675,7 +675,7 @@ pub fn generateSymbol( const is_payload = typed_value.val.errorUnionIsPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const err_val = if (is_payload) Value.zero else typed_value.val; + const err_val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, .val = err_val, @@ -690,7 +690,7 @@ pub fn generateSymbol( if (error_align > payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.zero else typed_value.val, + .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -722,7 +722,7 @@ pub fn generateSymbol( const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = if (is_payload) Value.zero else typed_value.val, + .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, @@ -1280,7 +1280,7 @@ pub fn genTypedValue( if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else Value.zero; + const err_val = if (!is_pl) typed_value.val else try mod.intValue(error_type, 0); return genTypedValue(bin_file, src_loc, .{ .ty = error_type, .val = err_val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 9443c2298a..aaeec05562 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -287,7 +287,7 @@ pub const Function = struct { if (gop.found_existing) return gop.value_ptr.*; const mod = f.object.dg.module; - const val = f.air.value(ref, mod).?; + const val = (try f.air.value(ref, mod)).?; const ty = f.typeOf(ref); const result: CValue = if (lowersToArray(ty, mod)) result: { @@ -356,7 +356,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; return f.object.dg.renderValue(w, ty, val, location); }, .undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location), @@ -369,7 +369,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); return w.writeByte(')'); @@ -383,7 +383,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); return f.writeCValue(w, member, .Other); @@ -397,7 +397,7 @@ pub const Function = struct { .constant => |inst| { const mod = f.object.dg.module; const ty = f.typeOf(inst); - const val = f.air.value(inst, mod).?; + const val = (try f.air.value(inst, mod)).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); try w.writeAll(")->"); @@ -690,7 +690,7 @@ pub const DeclGen = struct { location, ); try writer.print(") + {})", .{ - try dg.fmtIntLiteral(Type.usize, Value.one, .Other), + try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other), }); }, } @@ -1253,7 +1253,7 @@ pub const DeclGen = struct { .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const error_ty = ty.errorUnionSet(); - const error_val = if (val.errorUnionIsPayload()) Value.zero else val; + const error_val = if (val.errorUnionIsPayload()) try mod.intValue(Type.anyerror, 0) else val; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, error_val, location); @@ -3611,7 +3611,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); - const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -4183,7 +4183,7 @@ fn airCall( callee: { known: { const fn_decl = fn_decl: { - const callee_val = f.air.value(pl_op.operand, mod) orelse break :known; + const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known; break :fn_decl switch (callee_val.tag()) { .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, .function => callee_val.castTag(.function).?.data.owner_decl, @@ -4269,7 +4269,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; + const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4735,7 +4735,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, Type.usize); try writer.writeByte(')'); } - try f.object.dg.renderValue(writer, condition_ty, f.air.value(item, mod).?, .Other); + try f.object.dg.renderValue(writer, condition_ty, (try f.air.value(item, mod)).?, .Other); try writer.writeByte(':'); } try writer.writeByte(' '); @@ -5069,7 +5069,7 @@ fn airIsNull( // operand is a regular pointer, test `operand !=/== NULL` TypedValue{ .ty = optional_ty, .val = Value.null } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) - TypedValue{ .ty = payload_ty, .val = Value.zero } + TypedValue{ .ty = payload_ty, .val = try mod.intValue(payload_ty, 0) } else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf, mod); @@ -5325,7 +5325,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { }, .end => { try f.writeCValue(writer, field_ptr_val, .Other); - try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" - {}", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); }, } @@ -5378,7 +5378,7 @@ fn fieldPtr( .end => { try writer.writeByte('('); try f.writeCValue(writer, container_ptr_val, .Other); - try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" + {})", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); }, } @@ -5546,7 +5546,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, operand, .{ .identifier = "error" }) else - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Initializer); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Initializer); } try writer.writeAll(";\n"); return local; @@ -5673,7 +5673,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { try f.writeCValueDeref(writer, operand); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n "); return operand; @@ -5681,7 +5681,7 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); try f.writeCValueDeref(writer, operand); try writer.writeAll(".error = "); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n"); // Then return the payload pointer (only if it is used) @@ -5737,7 +5737,7 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { else try f.writeCValueMember(writer, local, .{ .identifier = "error" }); try a.assign(f, writer); - try f.object.dg.renderValue(writer, err_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, err_ty, try mod.intValue(err_ty, 0), .Other); try a.end(f, writer); } return local; @@ -5768,11 +5768,11 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const else try f.writeCValue(writer, operand, .Other) else - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeByte(' '); try writer.writeAll(operator); try writer.writeByte(' '); - try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other); + try f.object.dg.renderValue(writer, error_ty, try mod.intValue(error_ty, 0), .Other); try writer.writeAll(";\n"); return local; } @@ -5798,7 +5798,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); - try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); + try writer.print(")[{}]", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))}); } else try f.writeCValue(writer, operand, .Initializer); try writer.writeAll("; "); @@ -6022,7 +6022,7 @@ fn airCmpBuiltinCall( try writer.writeByte(')'); if (!ref_ret) try writer.print(" {s} {}", .{ compareOperatorC(operator), - try f.fmtIntLiteral(Type.i32, Value.zero), + try f.fmtIntLiteral(Type.i32, try mod.intValue(Type.i32, 0)), }); try writer.writeAll(";\n"); try v.end(f, inst, writer); @@ -6278,7 +6278,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.typeOf(bin_op.rhs); const elem_abi_size = elem_ty.abiSize(mod); - const val_is_undef = if (f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6326,7 +6326,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try writer.writeAll("for ("); try f.writeCValue(writer, index, .Other); try writer.writeAll(" = "); - try f.object.dg.renderValue(writer, Type.usize, Value.zero, .Initializer); + try f.object.dg.renderValue(writer, Type.usize, try mod.intValue(Type.usize, 0), .Initializer); try writer.writeAll("; "); try f.writeCValue(writer, index, .Other); try writer.writeAll(" != "); @@ -6677,27 +6677,27 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { - .Or, .Xor, .Add => Value.zero, + .Or, .Xor, .Add => try mod.intValue(scalar_ty, 0), .And => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.one, + .Bool => try mod.intValue(Type.comptime_int, 1), else => switch (scalar_ty.intInfo(mod).signedness) { .unsigned => try scalar_ty.maxIntScalar(mod), - .signed => Value.negative_one, + .signed => try mod.intValue(scalar_ty, -1), }, }, .Min => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.one, + .Bool => try mod.intValue(Type.comptime_int, 1), .Int => try scalar_ty.maxIntScalar(mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, .Max => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => Value.zero, + .Bool => try mod.intValue(scalar_ty, 0), .Int => try scalar_ty.minInt(stack.get(), mod), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, - .Mul => Value.one, + .Mul => try mod.intValue(Type.comptime_int, 1), }, .Initializer); try writer.writeAll(";\n"); @@ -7686,13 +7686,13 @@ const Vectorize = struct { try writer.writeAll("for ("); try f.writeCValue(writer, local, .Other); - try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, Value.zero)}); + try writer.print(" = {d}; ", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 0))}); try f.writeCValue(writer, local, .Other); try writer.print(" < {d}; ", .{ try f.fmtIntLiteral(Type.usize, len_val), }); try f.writeCValue(writer, local, .Other); - try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, Value.one)}); + try writer.print(" += {d}) {{\n", .{try f.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1))}); f.object.indent_writer.pushIndent(); break :index .{ .index = local }; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 9d8c3edaf5..c42719d07c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2854,7 +2854,7 @@ pub const DeclGen = struct { }, .Array => { const elem_ty = t.childType(mod); - assert(elem_ty.onePossibleValue(mod) == null); + if (std.debug.runtime_safety) assert((try elem_ty.onePossibleValue(mod)) == null); const elem_llvm_ty = try dg.lowerType(elem_ty); const total_len = t.arrayLen(mod) + @boolToInt(t.sentinel(mod) != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); @@ -3588,7 +3588,7 @@ pub const DeclGen = struct { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else Value.zero; + const err_val = if (!is_pl) tv.val else try mod.intValue(Type.anyerror, 0); return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } @@ -3596,7 +3596,7 @@ pub const DeclGen = struct { const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, - .val = if (is_pl) Value.zero else tv.val, + .val = if (is_pl) try mod.intValue(Type.anyerror, 0) else tv.val, }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, @@ -4476,7 +4476,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const llvm_val = try self.resolveValue(.{ .ty = self.typeOf(inst), - .val = self.air.value(inst, mod).?, + .val = (try self.air.value(inst, mod)).?, }); gop.value_ptr.* = llvm_val; return llvm_val; @@ -6873,7 +6873,7 @@ pub const FuncGen = struct { const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); - const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); + const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.anyerror, 0) }); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); return operand; @@ -8203,7 +8203,7 @@ pub const FuncGen = struct { const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8494,7 +8494,7 @@ pub const FuncGen = struct { const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty); const is_volatile = ptr_ty.isVolatilePtr(mod); - if (self.air.value(bin_op.rhs, mod)) |elem_val| { + if (try self.air.value(bin_op.rhs, mod)) |elem_val| { if (elem_val.isUndefDeep()) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -9323,7 +9323,7 @@ pub const FuncGen = struct { var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined }; for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(mod, i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; @@ -9344,7 +9344,7 @@ pub const FuncGen = struct { } else { var result = llvm_result_ty.getUndef(); for (elements, 0..) |elem, i| { - if (result_ty.structFieldValueComptime(mod, i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 32e0c13c37..3842da5f7b 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -232,7 +232,7 @@ pub const DeclGen = struct { /// Fetch the result-id for a previously generated instruction or constant. fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !IdRef { const mod = self.module; - if (self.air.value(inst, mod)) |val| { + if (try self.air.value(inst, mod)) |val| { const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (val.tag()) { @@ -584,7 +584,7 @@ pub const DeclGen = struct { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. - try self.addInt(Type.usize, Value.zero); + try self.addInt(Type.usize, Value.zero_usize); // TODO: Add dependency return; }, @@ -803,7 +803,7 @@ pub const DeclGen = struct { .ErrorUnion => { const payload_ty = ty.errorUnionPayload(); const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else Value.zero; + const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); const eu_layout = dg.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -2801,7 +2801,7 @@ pub const DeclGen = struct { const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); - const val_is_undef = if (self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; if (val_is_undef) { const undef = try self.spv.constUndef(ptr_ty_ref); try self.store(ptr_ty, ptr, undef); @@ -3141,7 +3141,7 @@ pub const DeclGen = struct { const label = IdRef{ .id = first_case_label.id + case_i }; for (items) |item| { - const value = self.air.value(item, mod) orelse { + const value = (try self.air.value(item, mod)) orelse { return self.todo("switch on runtime value???", .{}); }; const int_val = switch (cond_ty.zigTypeTag(mod)) { diff --git a/src/type.zig b/src/type.zig index e60d216085..e6d0af9f46 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3377,7 +3377,7 @@ pub const Type = struct { } /// For vectors, returns the element type. Otherwise returns self. - pub fn scalarType(ty: Type, mod: *const Module) Type { + pub fn scalarType(ty: Type, mod: *Module) Type { return switch (ty.zigTypeTag(mod)) { .Vector => ty.childType(mod), else => ty, @@ -3941,13 +3941,13 @@ pub const Type = struct { /// During semantic analysis, instead call `Sema.typeHasOnePossibleValue` which /// resolves field types rather than asserting they are already resolved. - pub fn onePossibleValue(starting_type: Type, mod: *const Module) ?Value { + pub fn onePossibleValue(starting_type: Type, mod: *Module) !?Value { var ty = starting_type; if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } @@ -3956,13 +3956,13 @@ pub const Type = struct { .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); - if (array_type.child.toType().onePossibleValue(mod) != null) + if ((try array_type.child.toType().onePossibleValue(mod)) != null) return Value.initTag(.the_only_possible_value); return null; }, .vector_type => |vector_type| { if (vector_type.len == 0) return Value.initTag(.empty_array); - if (vector_type.child.toType().onePossibleValue(mod)) |v| return v; + if (try vector_type.child.toType().onePossibleValue(mod)) |v| return v; return null; }, .opt_type => |child| { @@ -4055,7 +4055,7 @@ pub const Type = struct { assert(s.haveFieldTypes()); for (s.fields.values()) |field| { if (field.is_comptime) continue; - if (field.ty.onePossibleValue(mod) != null) continue; + if ((try field.ty.onePossibleValue(mod)) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -4066,7 +4066,7 @@ pub const Type = struct { for (tuple.values, 0..) |val, i| { const is_comptime = val.ip_index != .unreachable_value; if (is_comptime) continue; - if (tuple.types[i].onePossibleValue(mod) != null) continue; + if ((try tuple.types[i].onePossibleValue(mod)) != null) continue; return null; } return Value.initTag(.empty_struct_value); @@ -4089,7 +4089,7 @@ pub const Type = struct { switch (enum_full.fields.count()) { 0 => return Value.@"unreachable", 1 => if (enum_full.values.count() == 0) { - return Value.zero; // auto-numbered + return try mod.intValue(ty, 0); // auto-numbered } else { return enum_full.values.keys()[0]; }, @@ -4100,24 +4100,24 @@ pub const Type = struct { const enum_simple = ty.castTag(.enum_simple).?.data; switch (enum_simple.fields.count()) { 0 => return Value.@"unreachable", - 1 => return Value.zero, + 1 => return try mod.intValue(ty, 0), else => return null, } }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; if (!tag_ty.hasRuntimeBits(mod)) { - return Value.zero; + return try mod.intValue(ty, 0); } else { return null; } }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = union_obj.tag_ty.onePossibleValue(mod) orelse return null; + const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; if (union_obj.fields.count() == 0) return Value.@"unreachable"; const only_field = union_obj.fields.values()[0]; - const val_val = only_field.ty.onePossibleValue(mod) orelse return null; + const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; _ = tag_val; _ = val_val; return Value.initTag(.empty_struct_value); @@ -4128,7 +4128,7 @@ pub const Type = struct { .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); - if (ty.childType(mod).onePossibleValue(mod) != null) + if ((try ty.childType(mod).onePossibleValue(mod)) != null) return Value.initTag(.the_only_possible_value); return null; }, @@ -4365,8 +4365,8 @@ pub const Type = struct { /// Asserts that the type is an integer. pub fn minIntScalar(ty: Type, mod: *Module) !Value { const info = ty.intInfo(mod); - if (info.signedness == .unsigned) return Value.zero; - if (info.bits == 0) return Value.negative_one; + if (info.signedness == .unsigned) return mod.intValue(ty, 0); + if (info.bits == 0) return mod.intValue(ty, -1); if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); @@ -4392,17 +4392,17 @@ pub const Type = struct { } /// Asserts that the type is an integer. - pub fn maxIntScalar(self: Type, mod: *Module) !Value { - const info = self.intInfo(mod); + pub fn maxIntScalar(ty: Type, mod: *Module) !Value { + const info = ty.intInfo(mod); switch (info.bits) { 0 => return switch (info.signedness) { - .signed => Value.negative_one, - .unsigned => Value.zero, + .signed => mod.intValue(ty, -1), + .unsigned => mod.intValue(ty, 0), }, 1 => return switch (info.signedness) { - .signed => Value.zero, - .unsigned => Value.one, + .signed => mod.intValue(ty, 0), + .unsigned => mod.intValue(ty, 0), }, else => {}, } @@ -4662,7 +4662,7 @@ pub const Type = struct { } } - pub fn structFieldValueComptime(ty: Type, mod: *const Module, index: usize) ?Value { + pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; diff --git a/src/value.zig b/src/value.zig index eced9ba345..8268d1dde1 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1022,7 +1022,7 @@ pub const Value = struct { if (opt_val) |some| { return some.writeToMemory(child, mod, buffer); } else { - return writeToMemory(Value.zero, Type.usize, mod, buffer); + return writeToMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer); } }, else => return error.Unimplemented, @@ -1124,7 +1124,7 @@ pub const Value = struct { .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); const field_type = ty.unionFields().values()[field_index.?].ty; - const field_val = val.fieldValue(field_type, mod, field_index.?); + const field_val = try val.fieldValue(field_type, mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, @@ -1141,7 +1141,7 @@ pub const Value = struct { if (opt_val) |some| { return some.writeToPackedMemory(child, mod, buffer, bit_offset); } else { - return writeToPackedMemory(Value.zero, Type.usize, mod, buffer, bit_offset); + return writeToPackedMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer, bit_offset); } }, else => @panic("TODO implement writeToPackedMemory for more types"), @@ -1173,7 +1173,7 @@ pub const Value = struct { const int_info = ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; - if (bits == 0 or buffer.len == 0) return Value.zero; + if (bits == 0 or buffer.len == 0) return mod.intValue(ty, 0); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { @@ -1290,12 +1290,12 @@ pub const Value = struct { } }, .Int, .Enum => { - if (buffer.len == 0) return Value.zero; + if (buffer.len == 0) return mod.intValue(ty, 0); const int_info = ty.intInfo(mod); const abi_size = @intCast(usize, ty.abiSize(mod)); const bits = int_info.bits; - if (bits == 0) return Value.zero; + if (bits == 0) return mod.intValue(ty, 0); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => return mod.intValue(ty, std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed)), .unsigned => return mod.intValue(ty, std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned)), @@ -2091,11 +2091,11 @@ pub const Value = struct { // .the_one_possible_value, // .aggregate, // Note that we already checked above for matching tags, e.g. both .aggregate. - return ty.onePossibleValue(mod) != null; + return (try ty.onePossibleValue(mod)) != null; }, .Union => { // Here we have to check for value equality, as-if `a` has been coerced to `ty`. - if (ty.onePossibleValue(mod) != null) { + if ((try ty.onePossibleValue(mod)) != null) { return true; } if (a_ty.castTag(.anon_struct)) |payload| { @@ -2604,7 +2604,7 @@ pub const Value = struct { if (data.container_ptr.pointerDecl()) |decl_index| { const container_decl = mod.declPtr(decl_index); const field_type = data.container_ty.structFieldType(data.field_index); - const field_val = container_decl.val.fieldValue(field_type, mod, data.field_index); + const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index); return field_val.elemValue(mod, index); } else unreachable; }, @@ -2723,7 +2723,7 @@ pub const Value = struct { }; } - pub fn fieldValue(val: Value, ty: Type, mod: *const Module, index: usize) Value { + pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { switch (val.ip_index) { .undef => return Value.undef, .none => switch (val.tag()) { @@ -2737,14 +2737,14 @@ pub const Value = struct { return payload.val; }, - .the_only_possible_value => return ty.onePossibleValue(mod).?, + .the_only_possible_value => return (try ty.onePossibleValue(mod)).?, .empty_struct_value => { if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); return tuple.values[index]; } - if (ty.structFieldValueComptime(mod, index)) |some| { + if (try ty.structFieldValueComptime(mod, index)) |some| { return some; } unreachable; @@ -2968,7 +2968,7 @@ pub const Value = struct { switch (val.ip_index) { .undef => return val, .none => switch (val.tag()) { - .the_only_possible_value => return Value.zero, // for i0, u0 + .the_only_possible_value => return Value.float_zero, // for i0, u0 .lazy_align => { const ty = val.castTag(.lazy_align).?.data; if (opt_sema) |sema| { @@ -3402,7 +3402,7 @@ pub const Value = struct { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt(mod)) Value.negative_one else try ty.maxIntScalar(mod); + const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod); return bitwiseXor(anded, all_ones, ty, arena, mod); } @@ -3803,7 +3803,7 @@ pub const Value = struct { bits: u16, mod: *Module, ) !Value { - if (bits == 0) return Value.zero; + if (bits == 0) return mod.intValue(ty, 0); var val_space: Value.BigIntSpace = undefined; const val_bigint = val.toBigInt(&val_space, mod); @@ -4011,9 +4011,9 @@ pub const Value = struct { // The shift is enough to remove all the bits from the number, which means the // result is 0 or -1 depending on the sign. if (lhs_bigint.positive) { - return Value.zero; + return mod.intValue(ty, 0); } else { - return Value.negative_one; + return mod.intValue(ty, -1); } } @@ -5151,10 +5151,9 @@ pub const Value = struct { pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; - pub const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; - pub const one: Value = .{ .ip_index = .one, .legacy = undefined }; - pub const negative_one: Value = .{ .ip_index = .negative_one, .legacy = undefined }; + pub const zero_usize: Value = .{ .ip_index = .zero_usize, .legacy = undefined }; pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; + pub const float_zero: Value = .{ .ip_index = .zero, .legacy = undefined }; // TODO: replace this! pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined }; @@ -5169,7 +5168,9 @@ pub const Value = struct { } pub fn boolToInt(x: bool) Value { - return if (x) Value.one else Value.zero; + const zero: Value = .{ .ip_index = .zero, .legacy = undefined }; + const one: Value = .{ .ip_index = .one, .legacy = undefined }; + return if (x) one else zero; } pub const RuntimeIndex = enum(u32) { -- cgit v1.2.3 From a5fb16959423005de999fb541d5d5e9aebb8e09e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 15:07:28 -0700 Subject: stage2: bug fixes related to Type/Value/InternPool --- src/Sema.zig | 64 ++++++++++++++++++++++----------------------- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen/c.zig | 8 +++--- src/codegen/llvm.zig | 28 ++++++++++++-------- src/type.zig | 48 ++++++++++++++++++++-------------- src/value.zig | 10 ++++++- 6 files changed, 92 insertions(+), 68 deletions(-) (limited to 'src/arch') diff --git a/src/Sema.zig b/src/Sema.zig index 9b1da74982..ca6f28017b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1746,8 +1746,9 @@ pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { if (i < InternPool.static_len) return @intToEnum(Air.Inst.Ref, i); // The last section of indexes refers to the map of ZIR => AIR. const inst = sema.inst_map.get(i - InternPool.static_len).?; + if (inst == .generic_poison) return error.GenericPoison; const ty = sema.typeOf(inst); - if (ty.isGenericPoison()) return error.GenericPoison; + assert(!ty.isGenericPoison()); return inst; } @@ -2000,7 +2001,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( .constant => { const ty_pl = air_datas[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; - if (val.tag() == .runtime_value) make_runtime.* = true; + if (val.isRuntimeValue()) make_runtime.* = true; if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, @@ -9688,7 +9689,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod); + const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_ty); const dest_max_val = if (is_vector) try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) else @@ -10831,7 +10832,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer arena.deinit(); const min_int = try operand_ty.minInt(arena.allocator(), mod); - const max_int = try operand_ty.maxIntScalar(mod); + const max_int = try operand_ty.maxIntScalar(mod, Type.comptime_int); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return sema.fail( @@ -11683,7 +11684,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; const min = try ty.minInt(sema.arena, mod); - const max = try ty.maxIntScalar(mod); + const max = try ty.maxIntScalar(mod, Type.comptime_int); return RangeSetUnhandledIterator{ .sema = sema, @@ -12294,7 +12295,7 @@ fn zirShl( { const max_int = try sema.addConstant( lhs_ty, - try lhs_ty.maxInt(sema.arena, mod), + try lhs_ty.maxInt(sema.arena, mod, lhs_ty), ); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); @@ -16503,7 +16504,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -22202,8 +22203,8 @@ fn analyzeMinMax( else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(sema.arena, mod), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(sema.arena, mod), + .min => try comptime_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), else => unreachable, }; @@ -27931,33 +27932,32 @@ fn beginComptimePtrMutation( switch (parent.pointee) { .direct => |val_ptr| { const payload_ty = parent.ty.errorUnionPayload(); - switch (val_ptr.tag()) { - else => { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .ty = payload_ty, + }; + } else { + // An error union has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the error union from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.undef, - }; + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .eu_payload }, + .data = Value.undef, + }; - val_ptr.* = Value.initPayload(&payload.base); + val_ptr.* = Value.initPayload(&payload.base); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - }, - .eu_payload => return ComptimePtrMutationKit{ + return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .pointee = .{ .direct = &payload.data }, .ty = payload_ty, - }, + }; } }, .bad_decl_ty, .bad_ptr_ty => return parent, @@ -33225,7 +33225,7 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const gpa = sema.gpa; - if (val.ip_index != .none) { + if (val.ip_index != .none and val.ip_index != .null_value) { if (@enumToInt(val.ip_index) < Air.ref_start_index) return @intToEnum(Air.Inst.Ref, @enumToInt(val.ip_index)); try sema.air_instructions.append(gpa, .{ diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b208656a41..c5e3410947 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4915,7 +4915,7 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const sign_val = switch (tag) { .neg => try vec_ty.minInt(stack.get(), mod), - .fabs => try vec_ty.maxInt(stack.get(), mod), + .fabs => try vec_ty.maxInt(stack.get(), mod, vec_ty), else => unreachable, }; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index aaeec05562..b688aada34 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -3542,7 +3542,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { try v.elem(f, writer); } else switch (dest_int_info.signedness) { .unsigned => { - const mask_val = try inst_scalar_ty.maxIntScalar(mod); + const mask_val = try inst_scalar_ty.maxIntScalar(mod, scalar_ty); try writer.writeAll("zig_and_"); try f.object.dg.renderTypeForBuiltinFnName(writer, scalar_ty); try writer.writeByte('('); @@ -6681,13 +6681,13 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .And => switch (scalar_ty.zigTypeTag(mod)) { .Bool => try mod.intValue(Type.comptime_int, 1), else => switch (scalar_ty.intInfo(mod).signedness) { - .unsigned => try scalar_ty.maxIntScalar(mod), + .unsigned => try scalar_ty.maxIntScalar(mod, scalar_ty), .signed => try mod.intValue(scalar_ty, -1), }, }, .Min => switch (scalar_ty.zigTypeTag(mod)) { - .Bool => try mod.intValue(Type.comptime_int, 1), - .Int => try scalar_ty.maxIntScalar(mod), + .Bool => Value.one_comptime_int, + .Int => try scalar_ty.maxIntScalar(mod, scalar_ty), .Float => try Value.floatToValue(std.math.nan(f128), stack.get(), scalar_ty, target), else => unreachable, }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c42719d07c..23340b5d34 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3570,15 +3570,21 @@ pub const DeclGen = struct { }, .ErrorSet => { const llvm_ty = try dg.lowerType(Type.anyerror); - switch (tv.val.tag()) { - .@"error" => { - const err_name = tv.val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - return llvm_ty.constInt(kv.value, .False); + switch (tv.val.ip_index) { + .none => switch (tv.val.tag()) { + .@"error" => { + const err_name = tv.val.castTag(.@"error").?.data.name; + const kv = try dg.module.getErrorValue(err_name); + return llvm_ty.constInt(kv.value, .False); + }, + else => { + // In this case we are rendering an error union which has a 0 bits payload. + return llvm_ty.constNull(); + }, }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return llvm_ty.constNull(); + else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .int => |int| return llvm_ty.constInt(int.storage.u64, .False), + else => unreachable, }, } }, @@ -3588,7 +3594,7 @@ pub const DeclGen = struct { if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) tv.val else try mod.intValue(Type.anyerror, 0); + const err_val = if (!is_pl) tv.val else try mod.intValue(Type.err_int, 0); return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } @@ -3596,7 +3602,7 @@ pub const DeclGen = struct { const error_align = Type.anyerror.abiAlignment(mod); const llvm_error_value = try dg.lowerValue(.{ .ty = Type.anyerror, - .val = if (is_pl) try mod.intValue(Type.anyerror, 0) else tv.val, + .val = if (is_pl) try mod.intValue(Type.err_int, 0) else tv.val, }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, @@ -6873,7 +6879,7 @@ pub const FuncGen = struct { const err_union_ty = self.typeOf(ty_op.operand).childType(mod); const payload_ty = err_union_ty.errorUnionPayload(); - const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.anyerror, 0) }); + const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.err_int, 0) }); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); return operand; diff --git a/src/type.zig b/src/type.zig index e6d0af9f46..68ac507037 100644 --- a/src/type.zig +++ b/src/type.zig @@ -4382,8 +4382,9 @@ pub const Type = struct { } // Works for vectors and vectors of integers. - pub fn maxInt(ty: Type, arena: Allocator, mod: *Module) !Value { - const scalar = try maxIntScalar(ty.scalarType(mod), mod); + /// The returned Value will have type dest_ty. + pub fn maxInt(ty: Type, arena: Allocator, mod: *Module, dest_ty: Type) !Value { + const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty); if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { return Value.Tag.repeated.create(arena, scalar); } else { @@ -4391,18 +4392,18 @@ pub const Type = struct { } } - /// Asserts that the type is an integer. - pub fn maxIntScalar(ty: Type, mod: *Module) !Value { + /// The returned Value will have type dest_ty. + pub fn maxIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { const info = ty.intInfo(mod); switch (info.bits) { 0 => return switch (info.signedness) { - .signed => mod.intValue(ty, -1), - .unsigned => mod.intValue(ty, 0), + .signed => try mod.intValue(dest_ty, -1), + .unsigned => try mod.intValue(dest_ty, 0), }, 1 => return switch (info.signedness) { - .signed => mod.intValue(ty, 0), - .unsigned => mod.intValue(ty, 0), + .signed => try mod.intValue(dest_ty, 0), + .unsigned => try mod.intValue(dest_ty, 1), }, else => {}, } @@ -4410,11 +4411,11 @@ pub const Type = struct { if (std.math.cast(u6, info.bits - 1)) |shift| switch (info.signedness) { .signed => { const n = @as(i64, std.math.maxInt(i64)) >> (63 - shift); - return mod.intValue(Type.comptime_int, n); + return mod.intValue(dest_ty, n); }, .unsigned => { const n = @as(u64, std.math.maxInt(u64)) >> (63 - shift); - return mod.intValue(Type.comptime_int, n); + return mod.intValue(dest_ty, n); }, }; @@ -4423,7 +4424,7 @@ pub const Type = struct { try res.setTwosCompIntLimit(.max, info.signedness, info.bits); - return mod.intValue_big(Type.comptime_int, res.toConst()); + return mod.intValue_big(dest_ty, res.toConst()); } /// Asserts the type is an enum or a union. @@ -5068,6 +5069,7 @@ pub const Type = struct { pub fn isSimpleTuple(ty: Type) bool { return switch (ty.ip_index) { + .empty_struct => true, .none => switch (ty.tag()) { .tuple, .empty_struct_literal => true, else => false, @@ -5077,21 +5079,29 @@ pub const Type = struct { } pub fn isSimpleTupleOrAnonStruct(ty: Type) bool { - return switch (ty.tag()) { - .tuple, .empty_struct_literal, .anon_struct => true, + return switch (ty.ip_index) { + .empty_struct => true, + .none => switch (ty.tag()) { + .tuple, .empty_struct_literal, .anon_struct => true, + else => false, + }, else => false, }; } // Only allowed for simple tuple types pub fn tupleFields(ty: Type) Payload.Tuple.Data { - return switch (ty.tag()) { - .tuple => ty.castTag(.tuple).?.data, - .anon_struct => .{ - .types = ty.castTag(.anon_struct).?.data.types, - .values = ty.castTag(.anon_struct).?.data.values, + return switch (ty.ip_index) { + .empty_struct => .{ .types = &.{}, .values = &.{} }, + .none => switch (ty.tag()) { + .tuple => ty.castTag(.tuple).?.data, + .anon_struct => .{ + .types = ty.castTag(.anon_struct).?.data.types, + .values = ty.castTag(.anon_struct).?.data.values, + }, + .empty_struct_literal => .{ .types = &.{}, .values = &.{} }, + else => unreachable, }, - .empty_struct_literal => .{ .types = &.{}, .values = &.{} }, else => unreachable, }; } diff --git a/src/value.zig b/src/value.zig index 8268d1dde1..49ca651a79 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2625,6 +2625,10 @@ pub const Value = struct { } } + pub fn isRuntimeValue(val: Value) bool { + return val.ip_index == .none and val.tag() == .runtime_value; + } + pub fn tagIsVariable(val: Value) bool { return val.ip_index == .none and val.tag() == .variable; } @@ -3402,7 +3406,7 @@ pub const Value = struct { if (lhs.isUndef() or rhs.isUndef()) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod); + const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty); return bitwiseXor(anded, all_ones, ty, arena, mod); } @@ -5152,6 +5156,10 @@ pub const Value = struct { pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; pub const zero_usize: Value = .{ .ip_index = .zero_usize, .legacy = undefined }; + pub const zero_u8: Value = .{ .ip_index = .zero_u8, .legacy = undefined }; + pub const zero_comptime_int: Value = .{ .ip_index = .zero, .legacy = undefined }; + pub const one_comptime_int: Value = .{ .ip_index = .one, .legacy = undefined }; + pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one, .legacy = undefined }; pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; pub const float_zero: Value = .{ .ip_index = .zero, .legacy = undefined }; // TODO: replace this! pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; -- cgit v1.2.3 From 4d88f825bc5eb14aa00446f046ab4714a4fdce70 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 7 May 2023 15:38:31 -0700 Subject: stage2: implement intTagType logic This commit changes a lot of `*const Module` to `*Module` to make it work, since accessing the integer tag type of an enum might need to mutate the InternPool by adding a new integer type into it. An alternate strategy would be to pre-heat the InternPool with the integer tag type when creating an enum type, which would make it so that intTagType could accept a const Module instead of a mutable one, asserting that the InternPool already had the integer tag type. --- src/Module.zig | 29 ++++++++-------- src/Sema.zig | 14 ++++---- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/aarch64/abi.zig | 6 ++-- src/arch/arm/CodeGen.zig | 2 +- src/arch/arm/abi.zig | 4 +-- src/arch/riscv64/abi.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/wasm/CodeGen.zig | 16 ++++----- src/arch/wasm/abi.zig | 4 +-- src/arch/x86_64/CodeGen.zig | 4 +-- src/arch/x86_64/abi.zig | 4 +-- src/codegen.zig | 8 ++--- src/codegen/c.zig | 8 ++--- src/codegen/c/type.zig | 8 ++--- src/codegen/llvm.zig | 30 +++++++++-------- src/codegen/spirv.zig | 6 ++-- src/type.zig | 80 ++++++++++++++++++++------------------------ src/value.zig | 44 ++++++++++++------------ 19 files changed, 136 insertions(+), 137 deletions(-) (limited to 'src/arch') diff --git a/src/Module.zig b/src/Module.zig index 3f5dc8039e..ef38e6ff06 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -944,7 +944,7 @@ pub const Decl = struct { }; } - pub fn getAlignment(decl: Decl, mod: *const Module) u32 { + pub fn getAlignment(decl: Decl, mod: *Module) u32 { assert(decl.has_tv); if (decl.@"align" != 0) { // Explicit alignment. @@ -1053,7 +1053,7 @@ pub const Struct = struct { /// Returns the field alignment. If the struct is packed, returns 0. pub fn alignment( field: Field, - mod: *const Module, + mod: *Module, layout: std.builtin.Type.ContainerLayout, ) u32 { if (field.abi_align != 0) { @@ -1076,7 +1076,7 @@ pub const Struct = struct { } } - pub fn alignmentExtern(field: Field, mod: *const Module) u32 { + pub fn alignmentExtern(field: Field, mod: *Module) u32 { // This logic is duplicated in Type.abiAlignmentAdvanced. const ty_abi_align = field.ty.abiAlignment(mod); @@ -1157,7 +1157,7 @@ pub const Struct = struct { }; } - pub fn packedFieldBitOffset(s: Struct, mod: *const Module, index: usize) u16 { + pub fn packedFieldBitOffset(s: Struct, mod: *Module, index: usize) u16 { assert(s.layout == .Packed); assert(s.haveLayout()); var bit_sum: u64 = 0; @@ -1171,7 +1171,7 @@ pub const Struct = struct { } pub const RuntimeFieldIterator = struct { - module: *const Module, + module: *Module, struct_obj: *const Struct, index: u32 = 0, @@ -1201,7 +1201,7 @@ pub const Struct = struct { } }; - pub fn runtimeFieldIterator(s: *const Struct, module: *const Module) RuntimeFieldIterator { + pub fn runtimeFieldIterator(s: *const Struct, module: *Module) RuntimeFieldIterator { return .{ .struct_obj = s, .module = module, @@ -1353,7 +1353,7 @@ pub const Union = struct { /// Returns the field alignment, assuming the union is not packed. /// Keep implementation in sync with `Sema.unionFieldAlignment`. /// Prefer to call that function instead of this one during Sema. - pub fn normalAlignment(field: Field, mod: *const Module) u32 { + pub fn normalAlignment(field: Field, mod: *Module) u32 { if (field.abi_align == 0) { return field.ty.abiAlignment(mod); } else { @@ -1413,7 +1413,7 @@ pub const Union = struct { }; } - pub fn hasAllZeroBitFieldTypes(u: Union, mod: *const Module) bool { + pub fn hasAllZeroBitFieldTypes(u: Union, mod: *Module) bool { assert(u.haveFieldTypes()); for (u.fields.values()) |field| { if (field.ty.hasRuntimeBits(mod)) return false; @@ -1421,7 +1421,7 @@ pub const Union = struct { return true; } - pub fn mostAlignedField(u: Union, mod: *const Module) u32 { + pub fn mostAlignedField(u: Union, mod: *Module) u32 { assert(u.haveFieldTypes()); var most_alignment: u32 = 0; var most_index: usize = undefined; @@ -1438,7 +1438,7 @@ pub const Union = struct { } /// Returns 0 if the union is represented with 0 bits at runtime. - pub fn abiAlignment(u: Union, mod: *const Module, have_tag: bool) u32 { + pub fn abiAlignment(u: Union, mod: *Module, have_tag: bool) u32 { var max_align: u32 = 0; if (have_tag) max_align = u.tag_ty.abiAlignment(mod); for (u.fields.values()) |field| { @@ -1450,7 +1450,7 @@ pub const Union = struct { return max_align; } - pub fn abiSize(u: Union, mod: *const Module, have_tag: bool) u64 { + pub fn abiSize(u: Union, mod: *Module, have_tag: bool) u64 { return u.getLayout(mod, have_tag).abi_size; } @@ -1481,7 +1481,7 @@ pub const Union = struct { }; } - pub fn getLayout(u: Union, mod: *const Module, have_tag: bool) Layout { + pub fn getLayout(u: Union, mod: *Module, have_tag: bool) Layout { assert(u.haveLayout()); var most_aligned_field: u32 = undefined; var most_aligned_field_size: u64 = undefined; @@ -6988,6 +6988,7 @@ pub const AtomicPtrAlignmentError = error{ FloatTooBig, IntTooBig, BadType, + OutOfMemory, }; pub const AtomicPtrAlignmentDiagnostics = struct { @@ -7001,7 +7002,7 @@ pub const AtomicPtrAlignmentDiagnostics = struct { // TODO this function does not take into account CPU features, which can affect // this value. Audit this! pub fn atomicPtrAlignment( - mod: *const Module, + mod: *Module, ty: Type, diags: *AtomicPtrAlignmentDiagnostics, ) AtomicPtrAlignmentError!u32 { @@ -7080,7 +7081,7 @@ pub fn atomicPtrAlignment( const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => ty.intTagType(), + .Enum => try ty.intTagType(mod), .Float => { const bit_count = ty.floatBits(target); if (bit_count > max_atomic_bits) { diff --git a/src/Sema.zig b/src/Sema.zig index ca6f28017b..8725704937 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8249,7 +8249,6 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; - const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -8278,7 +8277,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; const enum_tag_ty = sema.typeOf(enum_tag); - const int_tag_ty = try enum_tag_ty.intTagType().copy(arena); + const int_tag_ty = try enum_tag_ty.intTagType(mod); if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { return sema.addConstant(int_tag_ty, opv); @@ -8310,7 +8309,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (try sema.resolveMaybeUndefVal(operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum()) { - const int_tag_ty = dest_ty.intTagType(); + const int_tag_ty = try dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { return sema.addConstant(dest_ty, int_val); } @@ -16268,7 +16267,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Enum => { // TODO: look into memoizing this result. - const int_tag_ty = try ty.intTagType().copy(sema.arena); + const int_tag_ty = try ty.intTagType(mod); const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum()); @@ -20354,7 +20353,7 @@ fn zirBitCount( block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, - comptime comptimeOp: fn (val: Value, ty: Type, mod: *const Module) u64, + comptime comptimeOp: fn (val: Value, ty: Type, mod: *Module) u64, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; @@ -20755,6 +20754,7 @@ fn checkAtomicPtrOperand( const mod = sema.mod; var diag: Module.AtomicPtrAlignmentDiagnostics = .{}; const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.FloatTooBig => return sema.fail( block, elem_ty_src, @@ -23462,7 +23462,7 @@ fn validateExternType( return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention()); }, .Enum => { - return sema.validateExternType(ty.intTagType(), position); + return sema.validateExternType(try ty.intTagType(mod), position); }, .Struct, .Union => switch (ty.containerLayout()) { .Extern => return true, @@ -23540,7 +23540,7 @@ fn explainWhyTypeIsNotExtern( } }, .Enum => { - const tag_ty = ty.intTagType(); + const tag_ty = try ty.intTagType(mod); try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index ea3814a20e..970d59a25f 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4533,7 +4533,7 @@ fn cmp( } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(), + .Enum => try lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 821afd27ae..1d042b632a 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -15,7 +15,7 @@ pub const Class = union(enum) { }; /// For `float_array` the second element will be the amount of floats. -pub fn classifyType(ty: Type, mod: *const Module) Class { +pub fn classifyType(ty: Type, mod: *Module) Class { std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; @@ -74,7 +74,7 @@ pub fn classifyType(ty: Type, mod: *const Module) Class { } const sret_float_count = 4; -fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u8 { +fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { const target = mod.getTarget(); const invalid = std.math.maxInt(u8); switch (ty.zigTypeTag(mod)) { @@ -115,7 +115,7 @@ fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u8 { } } -pub fn getFloatArrayType(ty: Type, mod: *const Module) ?Type { +pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type { switch (ty.zigTypeTag(mod)) { .Union => { const fields = ty.unionFields(); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 967a6dd753..50f6d76c55 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4480,7 +4480,7 @@ fn cmp( } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(), + .Enum => try lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index eee4b41eef..79ffadf831 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -24,7 +24,7 @@ pub const Class = union(enum) { pub const Context = enum { ret, arg }; -pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class { +pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; @@ -116,7 +116,7 @@ pub fn classifyType(ty: Type, mod: *const Module, ctx: Context) Class { } const byval_float_count = 4; -fn countFloats(ty: Type, mod: *const Module, maybe_float_bits: *?u16) u32 { +fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 { const target = mod.getTarget(); const invalid = std.math.maxInt(u32); switch (ty.zigTypeTag(mod)) { diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index ac0d8d3e32..28a69d9136 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -7,7 +7,7 @@ const Module = @import("../../Module.zig"); pub const Class = enum { memory, byval, integer, double_integer }; -pub fn classifyType(ty: Type, mod: *const Module) Class { +pub fn classifyType(ty: Type, mod: *Module) Class { const target = mod.getTarget(); std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 2cb35460c2..0490db615b 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1436,7 +1436,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. - .Enum => lhs_ty.intTagType(), + .Enum => try lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 36b805cf94..237a55984e 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1393,7 +1393,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *const Module) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Module) bool { switch (cc) { .Unspecified, .Inline => return isByRef(return_type, mod), .C => { @@ -1713,7 +1713,7 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, mod: *const Module) bool { +fn isByRef(ty: Type, mod: *Module) bool { const target = mod.getTarget(); switch (ty.zigTypeTag(mod)) { .Type, @@ -1787,7 +1787,7 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, mod: *const Module) SimdStoreStrategy { +fn determineSimdStoreStrategy(ty: Type, mod: *Module) SimdStoreStrategy { std.debug.assert(ty.zigTypeTag(mod) == .Vector); if (ty.bitSize(mod) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; @@ -3121,7 +3121,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}), } } else { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return func.lowerConstant(val, int_tag_ty); } }, @@ -3235,7 +3235,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// Returns a `Value` as a signed 32 bit value. /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. -fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { +fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) !i32 { const mod = func.bin_file.base.options.module.?; switch (ty.zigTypeTag(mod)) { .Enum => { @@ -3257,7 +3257,7 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { else => unreachable, } } else { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return func.valueAsI32(val, int_tag_ty); } }, @@ -3793,7 +3793,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { for (items, 0..) |ref, i| { const item_val = (try func.air.value(ref, mod)).?; - const int_val = func.valueAsI32(item_val, target_ty); + const int_val = try func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; } @@ -6814,7 +6814,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { return loc.index; } - const int_tag_ty = enum_ty.intTagType(); + const int_tag_ty = try enum_ty.intTagType(mod); if (int_tag_ty.bitSize(mod) > 64) { return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index c7819b0fa6..bb5911382b 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -21,7 +21,7 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, mod: *const Module) [2]Class { +pub fn classifyType(ty: Type, mod: *Module) [2]Class { const target = mod.getTarget(); if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; switch (ty.zigTypeTag(mod)) { @@ -93,7 +93,7 @@ pub fn classifyType(ty: Type, mod: *const Module) [2]Class { /// Returns the scalar type a given type can represent. /// Asserts given type can be represented as scalar, such as /// a struct with a single scalar field. -pub fn scalarType(ty: Type, mod: *const Module) Type { +pub fn scalarType(ty: Type, mod: *Module) Type { switch (ty.zigTypeTag(mod)) { .Struct => { switch (ty.containerLayout()) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index c5e3410947..1cfed06ff1 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -605,7 +605,7 @@ const FrameAlloc = struct { .ref_count = 0, }; } - fn initType(ty: Type, mod: *const Module) FrameAlloc { + fn initType(ty: Type, mod: *Module) FrameAlloc { return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) }); } }; @@ -2309,7 +2309,7 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b return .{ .load_frame = .{ .index = frame_index } }; } -fn regClassForType(ty: Type, mod: *const Module) RegisterManager.RegisterBitSet { +fn regClassForType(ty: Type, mod: *Module) RegisterManager.RegisterBitSet { return switch (ty.zigTypeTag(mod)) { .Float, .Vector => sse, else => gp, diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index ea75a1f4d2..1bae899d33 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -12,7 +12,7 @@ pub const Class = enum { float_combine, }; -pub fn classifyWindows(ty: Type, mod: *const Module) Class { +pub fn classifyWindows(ty: Type, mod: *Module) Class { // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017 // "There's a strict one-to-one correspondence between a function call's arguments // and the registers used for those arguments. Any argument that doesn't fit in 8 @@ -68,7 +68,7 @@ pub const Context = enum { ret, arg, other }; /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystemV(ty: Type, mod: *const Module, ctx: Context) [8]Class { +pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { const target = mod.getTarget(); const memory_class = [_]Class{ .memory, .none, .none, .none, diff --git a/src/codegen.zig b/src/codegen.zig index 8bd478bf7c..70df1fc17b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1241,7 +1241,7 @@ pub fn genTypedValue( if (enum_values.count() != 0) { const tag_val = enum_values.keys()[field_index.data]; return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.intTagType(), + .ty = try typed_value.ty.intTagType(mod), .val = tag_val, }, owner_decl_index); } else { @@ -1251,7 +1251,7 @@ pub fn genTypedValue( else => unreachable, } } else { - const int_tag_ty = typed_value.ty.intTagType(); + const int_tag_ty = try typed_value.ty.intTagType(mod); return genTypedValue(bin_file, src_loc, .{ .ty = int_tag_ty, .val = typed_value.val, @@ -1303,7 +1303,7 @@ pub fn genTypedValue( return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index); } -pub fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u64 { +pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; const payload_align = payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); @@ -1314,7 +1314,7 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u64 { } } -pub fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u64 { +pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; const payload_align = payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index b688aada34..3c6f5a9e73 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1300,7 +1300,7 @@ pub const DeclGen = struct { } }, else => { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return dg.renderValue(writer, int_tag_ty, val, location); }, } @@ -5198,7 +5198,7 @@ fn fieldLocation( container_ty: Type, field_ptr_ty: Type, field_index: u32, - mod: *const Module, + mod: *Module, ) union(enum) { begin: void, field: CValue, @@ -7722,7 +7722,7 @@ const LowerFnRetTyBuffer = struct { values: [1]Value, payload: Type.Payload.AnonStruct, }; -fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) Type { +fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *Module) Type { if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.noreturn; if (lowersToArray(ret_ty, mod)) { @@ -7740,7 +7740,7 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *const Module) T return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void; } -fn lowersToArray(ty: Type, mod: *const Module) bool { +fn lowersToArray(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => return true, else => return ty.isAbiInt(mod) and toCIntBits(@intCast(u32, ty.bitSize(mod))) == null, diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 84ddce6809..0823400858 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -292,17 +292,17 @@ pub const CType = extern union { .abi = std.math.log2_int(u32, abi_alignment), }; } - pub fn abiAlign(ty: Type, mod: *const Module) AlignAs { + pub fn abiAlign(ty: Type, mod: *Module) AlignAs { const abi_align = ty.abiAlignment(mod); return init(abi_align, abi_align); } - pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *const Module) AlignAs { + pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs { return init( struct_ty.structFieldAlign(field_i, mod), struct_ty.structFieldType(field_i).abiAlignment(mod), ); } - pub fn unionPayloadAlign(union_ty: Type, mod: *const Module) AlignAs { + pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs { const union_obj = union_ty.cast(Type.Payload.Union).?.data; const union_payload_align = union_obj.abiAlignment(mod, false); return init(union_payload_align, union_payload_align); @@ -1897,7 +1897,7 @@ pub const CType = extern union { } } - fn createFromType(store: *Store.Promoted, ty: Type, mod: *const Module, kind: Kind) !CType { + fn createFromType(store: *Store.Promoted, ty: Type, mod: *Module, kind: Kind) !CType { var convert: Convert = undefined; try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } }); return createFromConvert(store, ty, mod, kind, &convert); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 23340b5d34..5f013c38ec 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1527,7 +1527,7 @@ pub const Object = struct { }; const field_index_val = Value.initPayload(&buf_field_index.base); - const int_ty = ty.intTagType(); + const int_ty = try ty.intTagType(mod); const int_info = ty.intInfo(mod); assert(int_info.bits != 0); @@ -2805,7 +2805,7 @@ pub const DeclGen = struct { return dg.context.intType(info.bits); }, .Enum => { - const int_ty = t.intTagType(); + const int_ty = try t.intTagType(mod); const bit_count = int_ty.intInfo(mod).bits; assert(bit_count != 0); return dg.context.intType(bit_count); @@ -4334,7 +4334,9 @@ pub const DeclGen = struct { const mod = dg.module; const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => ty.intTagType(), + .Enum => ty.intTagType(mod) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), + }, .Float => { if (!is_rmw_xchg) return null; return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8)); @@ -5286,7 +5288,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const scalar_ty = operand_ty.scalarType(mod); const int_ty = switch (scalar_ty.zigTypeTag(mod)) { - .Enum => scalar_ty.intTagType(), + .Enum => try scalar_ty.intTagType(mod), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(mod); @@ -8867,7 +8869,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); - const int_tag_ty = enum_ty.intTagType(); + const int_tag_ty = try enum_ty.intTagType(mod); const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; const llvm_ret_ty = try self.dg.lowerType(Type.bool); @@ -8950,7 +8952,7 @@ pub const FuncGen = struct { const usize_llvm_ty = try self.dg.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); - const int_tag_ty = enum_ty.intTagType(); + const int_tag_ty = try enum_ty.intTagType(mod); const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); @@ -10487,7 +10489,7 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ fn llvmFieldIndex( ty: Type, field_index: usize, - mod: *const Module, + mod: *Module, ptr_pl_buf: *Type.Payload.Pointer, ) ?c_uint { // Detects where we inserted extra padding fields so that we can skip @@ -10564,7 +10566,7 @@ fn llvmFieldIndex( } } -fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *const Module) bool { +fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *Module) bool { if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false; const target = mod.getTarget(); @@ -10593,7 +10595,7 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *const Module) bool } } -fn firstParamSRetSystemV(ty: Type, mod: *const Module) bool { +fn firstParamSRetSystemV(ty: Type, mod: *Module) bool { const class = x86_64_abi.classifySystemV(ty, mod, .ret); if (class[0] == .memory) return true; if (class[0] == .x87 and class[2] != .none) return true; @@ -11041,7 +11043,7 @@ fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTyp fn ccAbiPromoteInt( cc: std.builtin.CallingConvention, - mod: *const Module, + mod: *Module, ty: Type, ) ?std.builtin.Signedness { const target = mod.getTarget(); @@ -11080,7 +11082,7 @@ fn ccAbiPromoteInt( /// This is the one source of truth for whether a type is passed around as an LLVM pointer, /// or as an LLVM value. -fn isByRef(ty: Type, mod: *const Module) bool { +fn isByRef(ty: Type, mod: *Module) bool { // For tuples and structs, if there are more than this many non-void // fields, then we make it byref, otherwise byval. const max_fields_byval = 0; @@ -11159,7 +11161,7 @@ fn isByRef(ty: Type, mod: *const Module) bool { } } -fn isScalar(mod: *const Module, ty: Type) bool { +fn isScalar(mod: *Module, ty: Type) bool { return switch (ty.zigTypeTag(mod)) { .Void, .Bool, @@ -11344,11 +11346,11 @@ fn buildAllocaInner( return alloca; } -fn errUnionPayloadOffset(payload_ty: Type, mod: *const Module) u1 { +fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u1 { return @boolToInt(Type.anyerror.abiAlignment(mod) > payload_ty.abiAlignment(mod)); } -fn errUnionErrorOffset(payload_ty: Type, mod: *const Module) u1 { +fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u1 { return @boolToInt(Type.anyerror.abiAlignment(mod) <= payload_ty.abiAlignment(mod)); } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3842da5f7b..843b67e426 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -745,7 +745,7 @@ pub const DeclGen = struct { .Enum => { const int_val = try val.enumToInt(ty, mod); - const int_ty = ty.intTagType(); + const int_ty = try ty.intTagType(mod); try self.lower(int_ty, int_val); }, @@ -1195,7 +1195,7 @@ pub const DeclGen = struct { return try self.intType(int_info.signedness, int_info.bits); }, .Enum => { - const tag_ty = ty.intTagType(); + const tag_ty = try ty.intTagType(mod); return self.resolveType(tag_ty, repr); }, .Float => { @@ -3090,7 +3090,7 @@ pub const DeclGen = struct { break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, .Enum => blk: { - const int_ty = cond_ty.intTagType(); + const int_ty = try cond_ty.intTagType(mod); const int_info = int_ty.intInfo(mod); const backing_bits = self.backingIntBits(int_info.bits) orelse { return self.todo("implement composite int switch", .{}); diff --git a/src/type.zig b/src/type.zig index 68ac507037..9c8c1f1591 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1606,7 +1606,7 @@ pub const Type = struct { /// may return false positives. pub fn hasRuntimeBitsAdvanced( ty: Type, - mod: *const Module, + mod: *Module, ignore_comptime_only: bool, strat: AbiAlignmentAdvancedStrat, ) RuntimeBitsError!bool { @@ -1785,7 +1785,7 @@ pub const Type = struct { return enum_simple.fields.count() >= 2; }, .enum_numbered, .enum_nonexhaustive => { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); }, @@ -1850,7 +1850,7 @@ pub const Type = struct { /// true if and only if the type has a well-defined memory layout /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout - pub fn hasWellDefinedLayout(ty: Type, mod: *const Module) bool { + pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => true, .ptr_type => true, @@ -1952,15 +1952,15 @@ pub const Type = struct { }; } - pub fn hasRuntimeBits(ty: Type, mod: *const Module) bool { + pub fn hasRuntimeBits(ty: Type, mod: *Module) bool { return hasRuntimeBitsAdvanced(ty, mod, false, .eager) catch unreachable; } - pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool { + pub fn hasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { return hasRuntimeBitsAdvanced(ty, mod, true, .eager) catch unreachable; } - pub fn isFnOrHasRuntimeBits(ty: Type, mod: *const Module) bool { + pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { switch (ty.zigTypeTag(mod)) { .Fn => { const fn_info = ty.fnInfo(); @@ -1980,7 +1980,7 @@ pub const Type = struct { } /// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. - pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *const Module) bool { + pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Fn => true, else => return ty.hasRuntimeBitsIgnoreComptime(mod), @@ -2019,11 +2019,11 @@ pub const Type = struct { } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. - pub fn ptrAlignment(ty: Type, mod: *const Module) u32 { + pub fn ptrAlignment(ty: Type, mod: *Module) u32 { return ptrAlignmentAdvanced(ty, mod, null) catch unreachable; } - pub fn ptrAlignmentAdvanced(ty: Type, mod: *const Module, opt_sema: ?*Sema) !u32 { + pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 { switch (ty.ip_index) { .none => switch (ty.tag()) { .pointer => { @@ -2072,7 +2072,7 @@ pub const Type = struct { } /// Returns 0 for 0-bit types. - pub fn abiAlignment(ty: Type, mod: *const Module) u32 { + pub fn abiAlignment(ty: Type, mod: *Module) u32 { return (ty.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; } @@ -2103,7 +2103,7 @@ pub const Type = struct { /// necessary, possibly returning a CompileError. pub fn abiAlignmentAdvanced( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiAlignmentAdvanced { const target = mod.getTarget(); @@ -2320,7 +2320,7 @@ pub const Type = struct { }, .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; }, .@"union" => { @@ -2344,7 +2344,7 @@ pub const Type = struct { fn abiAlignmentAdvancedErrorUnion( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiAlignmentAdvanced { // This code needs to be kept in sync with the equivalent switch prong @@ -2380,7 +2380,7 @@ pub const Type = struct { fn abiAlignmentAdvancedOptional( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiAlignmentAdvanced { const target = mod.getTarget(); @@ -2412,7 +2412,7 @@ pub const Type = struct { pub fn abiAlignmentAdvancedUnion( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -2477,7 +2477,7 @@ pub const Type = struct { /// Asserts the type has the ABI size already resolved. /// Types that return false for hasRuntimeBits() return 0. - pub fn abiSize(ty: Type, mod: *const Module) u64 { + pub fn abiSize(ty: Type, mod: *Module) u64 { return (abiSizeAdvanced(ty, mod, .eager) catch unreachable).scalar; } @@ -2494,7 +2494,7 @@ pub const Type = struct { /// necessary, possibly returning a CompileError. pub fn abiSizeAdvanced( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiSizeAdvanced { const target = mod.getTarget(); @@ -2661,7 +2661,7 @@ pub const Type = struct { }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; }, .@"union" => { @@ -2754,7 +2754,7 @@ pub const Type = struct { pub fn abiSizeAdvancedUnion( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, union_obj: *Module.Union, have_tag: bool, @@ -2773,7 +2773,7 @@ pub const Type = struct { fn abiSizeAdvancedOptional( ty: Type, - mod: *const Module, + mod: *Module, strat: AbiAlignmentAdvancedStrat, ) Module.CompileError!AbiSizeAdvanced { const child_ty = ty.optionalChild(mod); @@ -2821,7 +2821,7 @@ pub const Type = struct { ); } - pub fn bitSize(ty: Type, mod: *const Module) u64 { + pub fn bitSize(ty: Type, mod: *Module) u64 { return bitSizeAdvanced(ty, mod, null) catch unreachable; } @@ -2830,7 +2830,7 @@ pub const Type = struct { /// the type is fully resolved, and there will be no error, guaranteed. pub fn bitSizeAdvanced( ty: Type, - mod: *const Module, + mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!u64 { const target = mod.getTarget(); @@ -2950,7 +2950,7 @@ pub const Type = struct { }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = ty.intTagType(); + const int_tag_ty = try ty.intTagType(mod); return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); }, @@ -3464,11 +3464,11 @@ pub const Type = struct { return union_obj.fields.getIndex(name); } - pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *const Module) bool { + pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(mod); } - pub fn unionGetLayout(ty: Type, mod: *const Module) Module.Union.Layout { + pub fn unionGetLayout(ty: Type, mod: *Module) Module.Union.Layout { switch (ty.tag()) { .@"union" => { const union_obj = ty.castTag(.@"union").?.data; @@ -4428,24 +4428,18 @@ pub const Type = struct { } /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type) Type { + pub fn intTagType(ty: Type, mod: *Module) !Type { switch (ty.tag()) { .enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty, .enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty, .enum_simple => { - @panic("TODO move enum_simple to use the intern pool"); - //const enum_simple = ty.castTag(.enum_simple).?.data; - //const field_count = enum_simple.fields.count(); - //const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); - //buffer.* = .{ - // .base = .{ .tag = .int_unsigned }, - // .data = bits, - //}; - //return Type.initPayload(&buffer.base); + const enum_simple = ty.castTag(.enum_simple).?.data; + const field_count = enum_simple.fields.count(); + const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); + return mod.intType(.unsigned, bits); }, .union_tagged => { - @panic("TODO move union_tagged to use the intern pool"); - //return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer), + return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(mod); }, else => unreachable, } @@ -4628,7 +4622,7 @@ pub const Type = struct { } } - pub fn structFieldAlign(ty: Type, index: usize, mod: *const Module) u32 { + pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -4718,7 +4712,7 @@ pub const Type = struct { } } - pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *const Module) u32 { + pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); @@ -4750,7 +4744,7 @@ pub const Type = struct { offset: u64 = 0, big_align: u32 = 0, struct_obj: *Module.Struct, - module: *const Module, + module: *Module, pub fn next(it: *StructOffsetIterator) ?FieldOffset { const mod = it.module; @@ -4779,7 +4773,7 @@ pub const Type = struct { /// Get an iterator that iterates over all the struct field, returning the field and /// offset of that field. Asserts that the type is a non-packed struct. - pub fn iterateStructOffsets(ty: Type, mod: *const Module) StructOffsetIterator { + pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator { const struct_obj = ty.castTag(.@"struct").?.data; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); @@ -4787,7 +4781,7 @@ pub const Type = struct { } /// Supports structs and unions. - pub fn structFieldOffset(ty: Type, index: usize, mod: *const Module) u64 { + pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -5226,7 +5220,7 @@ pub const Type = struct { pub const VectorIndex = InternPool.Key.PtrType.VectorIndex; - pub fn alignment(data: Data, mod: *const Module) u32 { + pub fn alignment(data: Data, mod: *Module) u32 { if (data.@"align" != 0) return data.@"align"; return abiAlignment(data.pointee_type, mod); } diff --git a/src/value.zig b/src/value.zig index 49ca651a79..402e0981d3 100644 --- a/src/value.zig +++ b/src/value.zig @@ -694,7 +694,7 @@ pub const Value = struct { }, .enum_simple => { // Field index and integer values are the same. - const tag_ty = ty.intTagType(); + const tag_ty = try ty.intTagType(mod); return mod.intValue(tag_ty, field_index); }, else => unreachable, @@ -722,7 +722,9 @@ pub const Value = struct { // auto-numbered enum break :field_index @intCast(u32, val.toUnsignedInt(mod)); } - const int_tag_ty = ty.intTagType(); + const int_tag_ty = ty.intTagType(mod) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO handle this failure + }; break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?); }, }; @@ -737,7 +739,7 @@ pub const Value = struct { } /// Asserts the value is an integer. - pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *const Module) BigIntConst { + pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { return val.toBigIntAdvanced(space, mod, null) catch unreachable; } @@ -745,7 +747,7 @@ pub const Value = struct { pub fn toBigIntAdvanced( val: Value, space: *BigIntSpace, - mod: *const Module, + mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { return switch (val.ip_index) { @@ -801,13 +803,13 @@ pub const Value = struct { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedInt(val: Value, mod: *const Module) ?u64 { + pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { return getUnsignedIntAdvanced(val, mod, null) catch unreachable; } /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. - pub fn getUnsignedIntAdvanced(val: Value, mod: *const Module, opt_sema: ?*Sema) !?u64 { + pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, @@ -847,12 +849,12 @@ pub const Value = struct { } /// Asserts the value is an integer and it fits in a u64 - pub fn toUnsignedInt(val: Value, mod: *const Module) u64 { + pub fn toUnsignedInt(val: Value, mod: *Module) u64 { return getUnsignedInt(val, mod).?; } /// Asserts the value is an integer and it fits in a i64 - pub fn toSignedInt(val: Value, mod: *const Module) i64 { + pub fn toSignedInt(val: Value, mod: *Module) i64 { switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, @@ -1405,7 +1407,7 @@ pub const Value = struct { } } - pub fn clz(val: Value, ty: Type, mod: *const Module) u64 { + pub fn clz(val: Value, ty: Type, mod: *Module) u64 { const ty_bits = ty.intInfo(mod).bits; return switch (val.ip_index) { .bool_false => ty_bits, @@ -1435,7 +1437,7 @@ pub const Value = struct { }; } - pub fn ctz(val: Value, ty: Type, mod: *const Module) u64 { + pub fn ctz(val: Value, ty: Type, mod: *Module) u64 { const ty_bits = ty.intInfo(mod).bits; return switch (val.ip_index) { .bool_false => ty_bits, @@ -1468,7 +1470,7 @@ pub const Value = struct { }; } - pub fn popCount(val: Value, ty: Type, mod: *const Module) u64 { + pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { assert(!val.isUndef()); switch (val.ip_index) { .bool_false => return 0, @@ -1527,7 +1529,7 @@ pub const Value = struct { /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. - pub fn intBitCountTwosComp(self: Value, mod: *const Module) usize { + pub fn intBitCountTwosComp(self: Value, mod: *Module) usize { const target = mod.getTarget(); return switch (self.ip_index) { .bool_false => 0, @@ -1593,13 +1595,13 @@ pub const Value = struct { }; } - pub fn orderAgainstZero(lhs: Value, mod: *const Module) std.math.Order { + pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order { return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; } pub fn orderAgainstZeroAdvanced( lhs: Value, - mod: *const Module, + mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { switch (lhs.ip_index) { @@ -1683,13 +1685,13 @@ pub const Value = struct { } /// Asserts the value is comparable. - pub fn order(lhs: Value, rhs: Value, mod: *const Module) std.math.Order { + pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order { return orderAdvanced(lhs, rhs, mod, null) catch unreachable; } /// Asserts the value is comparable. /// If opt_sema is null then this function asserts things are resolved and cannot fail. - pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *const Module, opt_sema: ?*Sema) !std.math.Order { + pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order { const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); switch (lhs_against_zero) { @@ -1734,7 +1736,7 @@ pub const Value = struct { /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. - pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *const Module) bool { + pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool { return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; } @@ -1742,7 +1744,7 @@ pub const Value = struct { lhs: Value, op: std.math.CompareOperator, rhs: Value, - mod: *const Module, + mod: *Module, opt_sema: ?*Sema, ) !bool { if (lhs.pointerDecl()) |lhs_decl| { @@ -2047,7 +2049,7 @@ pub const Value = struct { .Enum => { const a_val = try a.enumToInt(ty, mod); const b_val = try b.enumToInt(ty, mod); - const int_ty = ty.intTagType(); + const int_ty = try ty.intTagType(mod); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { @@ -2462,7 +2464,7 @@ pub const Value = struct { }; } - fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void { + fn hashInt(int_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void { var buffer: BigIntSpace = undefined; const big = int_val.toBigInt(&buffer, mod); std.hash.autoHash(hasher, big.positive); @@ -2471,7 +2473,7 @@ pub const Value = struct { } } - fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *const Module) void { + fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void { switch (ptr_val.tag()) { .decl_ref, .decl_ref_mut, -- cgit v1.2.3 From 275652f620541919087bc92da0d2f9e97c66d3c0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 8 May 2023 16:52:59 -0700 Subject: stage2: move opaque types to InternPool --- src/Compilation.zig | 24 +- src/InternPool.zig | 73 +++-- src/Module.zig | 395 ++++++++++++++++----------- src/Sema.zig | 637 ++++++++++++++++++++++--------------------- src/arch/wasm/CodeGen.zig | 5 +- src/arch/wasm/Emit.zig | 2 +- src/arch/x86_64/CodeGen.zig | 15 +- src/codegen/c.zig | 6 +- src/codegen/c/type.zig | 8 +- src/codegen/llvm.zig | 66 ++--- src/codegen/spirv.zig | 11 +- src/codegen/spirv/Module.zig | 4 +- src/crash_report.zig | 8 +- src/link.zig | 4 +- src/link/Coff.zig | 67 ++--- src/link/Dwarf.zig | 2 +- src/link/Elf.zig | 65 ++--- src/link/MachO.zig | 84 +++--- src/link/Plan9.zig | 32 +-- src/link/Wasm.zig | 14 +- src/type.zig | 221 +++++++-------- 21 files changed, 935 insertions(+), 808 deletions(-) (limited to 'src/arch') diff --git a/src/Compilation.zig b/src/Compilation.zig index 75af9362f6..6291ce78d4 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2048,7 +2048,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void assert(decl.deletion_flag); assert(decl.dependants.count() == 0); const is_anon = if (decl.zir_decl_index == 0) blk: { - break :blk decl.src_namespace.anon_decls.swapRemove(decl_index); + break :blk module.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index); } else false; try module.clearDecl(decl_index, null); @@ -2530,8 +2530,7 @@ pub fn totalErrorCount(self: *Compilation) u32 { // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. for (module.failed_decls.keys()) |key| { - const decl = module.declPtr(key); - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(key).okToReportErrors()) { total += 1; if (module.cimport_errors.get(key)) |errors| { total += errors.len; @@ -2540,8 +2539,7 @@ pub fn totalErrorCount(self: *Compilation) u32 { } if (module.emit_h) |emit_h| { for (emit_h.failed_decls.keys()) |key| { - const decl = module.declPtr(key); - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(key).okToReportErrors()) { total += 1; } } @@ -2644,10 +2642,10 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { { var it = module.failed_decls.iterator(); while (it.next()) |entry| { - const decl = module.declPtr(entry.key_ptr.*); + const decl_index = entry.key_ptr.*; // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(decl_index).okToReportErrors()) { try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); if (module.cimport_errors.get(entry.key_ptr.*)) |cimport_errors| for (cimport_errors) |c_error| { try bundle.addRootErrorMessage(.{ @@ -2669,10 +2667,10 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { if (module.emit_h) |emit_h| { var it = emit_h.failed_decls.iterator(); while (it.next()) |entry| { - const decl = module.declPtr(entry.key_ptr.*); + const decl_index = entry.key_ptr.*; // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(decl_index).okToReportErrors()) { try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); } } @@ -2710,7 +2708,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { const values = module.compile_log_decls.values(); // First one will be the error; subsequent ones will be notes. const err_decl = module.declPtr(keys[0]); - const src_loc = err_decl.nodeOffsetSrcLoc(values[0]); + const src_loc = err_decl.nodeOffsetSrcLoc(values[0], module); const err_msg = Module.ErrorMsg{ .src_loc = src_loc, .msg = "found compile log statement", @@ -2721,7 +2719,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { for (keys[1..], 0..) |key, i| { const note_decl = module.declPtr(key); err_msg.notes[i] = .{ - .src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1]), + .src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1], module), .msg = "also here", }; } @@ -3235,7 +3233,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v try module.failed_decls.ensureUnusedCapacity(gpa, 1); module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(module), "unable to update line number: {s}", .{@errorName(err)}, )); @@ -3848,7 +3846,7 @@ fn reportRetryableEmbedFileError( const mod = comp.bin_file.options.module.?; const gpa = mod.gpa; - const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(); + const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(mod); const err_msg = if (embed_file.pkg.root_src_directory.path) |dir_path| try Module.ErrorMsg.create( diff --git a/src/InternPool.zig b/src/InternPool.zig index 69037c3899..3708e21ef6 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -17,7 +17,8 @@ const BigIntMutable = std.math.big.int.Mutable; const Limb = std.math.big.Limb; const InternPool = @This(); -const DeclIndex = enum(u32) { _ }; +const DeclIndex = @import("Module.zig").Decl.Index; +const NamespaceIndex = @import("Module.zig").Namespace.Index; const KeyAdapter = struct { intern_pool: *const InternPool, @@ -48,7 +49,7 @@ pub const Key = union(enum) { extern_func: struct { ty: Index, /// The Decl that corresponds to the function itself. - owner_decl: DeclIndex, + decl: DeclIndex, /// Library name if specified. /// For example `extern "c" fn write(...) usize` would have 'c' as library name. /// Index into the string table bytes. @@ -62,6 +63,7 @@ pub const Key = union(enum) { tag: BigIntConst, }, struct_type: StructType, + opaque_type: OpaqueType, union_type: struct { fields_len: u32, @@ -116,6 +118,13 @@ pub const Key = union(enum) { // TODO move Module.Struct data to InternPool }; + pub const OpaqueType = struct { + /// The Decl that corresponds to the opaque itself. + decl: DeclIndex, + /// Represents the declarations inside this opaque. + namespace: NamespaceIndex, + }; + pub const Int = struct { ty: Index, storage: Storage, @@ -221,6 +230,7 @@ pub const Key = union(enum) { _ = union_type; @panic("TODO"); }, + .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), } } @@ -338,6 +348,11 @@ pub const Key = union(enum) { _ = b_info; @panic("TODO"); }, + + .opaque_type => |a_info| { + const b_info = b.opaque_type; + return a_info.decl == b_info.decl; + }, } } @@ -352,6 +367,7 @@ pub const Key = union(enum) { .simple_type, .struct_type, .union_type, + .opaque_type, => return .type_type, inline .ptr, @@ -770,10 +786,13 @@ pub const Tag = enum(u8) { /// are auto-numbered, and there are no declarations. /// data is payload index to `EnumSimple`. type_enum_simple, - /// A type that can be represented with only an enum tag. /// data is SimpleType enum value. simple_type, + /// An opaque type. + /// data is index of Key.OpaqueType in extra. + type_opaque, + /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, @@ -986,7 +1005,7 @@ pub const ErrorUnion = struct { /// 0. field name: null-terminated string index for each fields_len; declaration order pub const EnumSimple = struct { /// The Decl that corresponds to the enum itself. - owner_decl: DeclIndex, + decl: DeclIndex, /// An integer type which is used for the numerical value of the enum. This /// is inferred by Zig to be the smallest power of two unsigned int that /// fits the number of fields. It is stored here to avoid unnecessary @@ -1146,6 +1165,9 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_error_union => @panic("TODO"), .type_enum_simple => @panic("TODO"), + + .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, + .simple_internal => switch (@intToEnum(SimpleInternal, data)) { .type_empty_struct => .{ .struct_type = .{ .fields_len = 0, @@ -1335,6 +1357,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = @enumToInt(simple_value), }); }, + + .struct_type => |struct_type| { + if (struct_type.fields_len != 0) { + @panic("TODO"); // handle structs other than empty_struct + } + ip.items.appendAssumeCapacity(.{ + .tag = .simple_internal, + .data = @enumToInt(SimpleInternal.type_empty_struct), + }); + }, + + .union_type => |union_type| { + _ = union_type; + @panic("TODO"); + }, + + .opaque_type => |opaque_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_opaque, + .data = try ip.addExtra(gpa, opaque_type), + }); + }, + .extern_func => @panic("TODO"), .ptr => |ptr| switch (ptr.addr) { @@ -1504,21 +1549,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative; try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs); }, - - .struct_type => |struct_type| { - if (struct_type.fields_len != 0) { - @panic("TODO"); // handle structs other than empty_struct - } - ip.items.appendAssumeCapacity(.{ - .tag = .simple_internal, - .data = @enumToInt(SimpleInternal.type_empty_struct), - }); - }, - - .union_type => |union_type| { - _ = union_type; - @panic("TODO"); - }, } return @intToEnum(Index, ip.items.len - 1); } @@ -1548,6 +1578,8 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { ip.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), Index => @enumToInt(@field(extra, field.name)), + DeclIndex => @enumToInt(@field(extra, field.name)), + NamespaceIndex => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), @@ -1603,6 +1635,8 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T { @field(result, field.name) = switch (field.type) { u32 => int32, Index => @intToEnum(Index, int32), + DeclIndex => @intToEnum(DeclIndex, int32), + NamespaceIndex => @intToEnum(NamespaceIndex, int32), i32 => @bitCast(i32, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), @@ -1824,6 +1858,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_optional => 0, .type_error_union => @sizeOf(ErrorUnion), .type_enum_simple => @sizeOf(EnumSimple), + .type_opaque => @sizeOf(Key.OpaqueType), .simple_type => 0, .simple_value => 0, .simple_internal => 0, diff --git a/src/Module.zig b/src/Module.zig index dc7e34adc3..7521d4d439 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -185,6 +185,11 @@ allocated_decls: std.SegmentedList(Decl, 0) = .{}, /// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. decls_free_list: ArrayListUnmanaged(Decl.Index) = .{}, +/// Same pattern as with `allocated_decls`. +allocated_namespaces: std.SegmentedList(Namespace, 0) = .{}, +/// Same pattern as with `decls_free_list`. +namespaces_free_list: ArrayListUnmanaged(Namespace.Index) = .{}, + global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{}, reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { @@ -363,7 +368,7 @@ pub const Export = struct { pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc { const src_decl = mod.declPtr(exp.src_decl); return .{ - .file_scope = src_decl.getFileScope(), + .file_scope = src_decl.getFileScope(mod), .parent_decl_node = src_decl.src_node, .lazy = exp.src, }; @@ -494,7 +499,7 @@ pub const Decl = struct { /// Reference to externally owned memory. /// In the case of the Decl corresponding to a file, this is /// the namespace of the struct, since there is no parent. - src_namespace: *Namespace, + src_namespace: Namespace.Index, /// The scope which lexically contains this decl. A decl must depend /// on its lexical parent, in order to ensure that this pointer is valid. @@ -691,8 +696,8 @@ pub const Decl = struct { /// This name is relative to the containing namespace of the decl. /// The memory is owned by the containing File ZIR. - pub fn getName(decl: Decl) ?[:0]const u8 { - const zir = decl.getFileScope().zir; + pub fn getName(decl: Decl, mod: *Module) ?[:0]const u8 { + const zir = decl.getFileScope(mod).zir; return decl.getNameZir(zir); } @@ -703,8 +708,8 @@ pub const Decl = struct { return zir.nullTerminatedString(name_index); } - pub fn contentsHash(decl: Decl) std.zig.SrcHash { - const zir = decl.getFileScope().zir; + pub fn contentsHash(decl: Decl, mod: *Module) std.zig.SrcHash { + const zir = decl.getFileScope(mod).zir; return decl.contentsHashZir(zir); } @@ -715,31 +720,31 @@ pub const Decl = struct { return contents_hash; } - pub fn zirBlockIndex(decl: *const Decl) Zir.Inst.Index { + pub fn zirBlockIndex(decl: *const Decl, mod: *Module) Zir.Inst.Index { assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; return zir.extra[decl.zir_decl_index + 6]; } - pub fn zirAlignRef(decl: Decl) Zir.Inst.Ref { + pub fn zirAlignRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_align) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; return @intToEnum(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 8]); } - pub fn zirLinksectionRef(decl: Decl) Zir.Inst.Ref { + pub fn zirLinksectionRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_linksection_or_addrspace) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @boolToInt(decl.has_align); return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } - pub fn zirAddrspaceRef(decl: Decl) Zir.Inst.Ref { + pub fn zirAddrspaceRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_linksection_or_addrspace) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @boolToInt(decl.has_align) + 1; return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } @@ -764,25 +769,25 @@ pub const Decl = struct { return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(node_index)); } - pub fn srcLoc(decl: Decl) SrcLoc { - return decl.nodeOffsetSrcLoc(0); + pub fn srcLoc(decl: Decl, mod: *Module) SrcLoc { + return decl.nodeOffsetSrcLoc(0, mod); } - pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32) SrcLoc { + pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32, mod: *Module) SrcLoc { return .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = LazySrcLoc.nodeOffset(node_offset), }; } - pub fn srcToken(decl: Decl) Ast.TokenIndex { - const tree = &decl.getFileScope().tree; + pub fn srcToken(decl: Decl, mod: *Module) Ast.TokenIndex { + const tree = &decl.getFileScope(mod).tree; return tree.firstToken(decl.src_node); } - pub fn srcByteOffset(decl: Decl) u32 { - const tree = &decl.getFileScope().tree; + pub fn srcByteOffset(decl: Decl, mod: *Module) u32 { + const tree = &decl.getFileScope(mod).tree; return tree.tokens.items(.start)[decl.srcToken()]; } @@ -791,12 +796,12 @@ pub const Decl = struct { if (decl.name_fully_qualified) { return writer.writeAll(unqualified_name); } - return decl.src_namespace.renderFullyQualifiedName(mod, unqualified_name, writer); + return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedName(mod, unqualified_name, writer); } pub fn renderFullyQualifiedDebugName(decl: Decl, mod: *Module, writer: anytype) !void { const unqualified_name = mem.sliceTo(decl.name, 0); - return decl.src_namespace.renderFullyQualifiedDebugName(mod, unqualified_name, writer); + return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(mod, unqualified_name, writer); } pub fn getFullyQualifiedName(decl: Decl, mod: *Module) ![:0]u8 { @@ -877,32 +882,39 @@ pub const Decl = struct { /// Gets the namespace that this Decl creates by being a struct, union, /// enum, or opaque. /// Only returns it if the Decl is the owner. - pub fn getInnerNamespace(decl: *Decl) ?*Namespace { - if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return &struct_obj.namespace; - }, - .enum_full, .enum_nonexhaustive => { - const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; - return &enum_obj.namespace; - }, - .empty_struct => { - return ty.castTag(.empty_struct).?.data; - }, - .@"opaque" => { - const opaque_obj = ty.cast(Type.Payload.Opaque).?.data; - return &opaque_obj.namespace; - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - return &union_obj.namespace; - }, + pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex { + if (!decl.owns_tv) return .none; + if (decl.val.ip_index == .none) { + const ty = (decl.val.castTag(.ty) orelse return .none).data; + switch (ty.tag()) { + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + return struct_obj.namespace.toOptional(); + }, + .enum_full, .enum_nonexhaustive => { + const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; + return enum_obj.namespace.toOptional(); + }, + .empty_struct => { + @panic("TODO"); + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + return union_obj.namespace.toOptional(); + }, - else => return null, + else => return .none, + } } + return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + else => .none, + }; + } + + /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. + pub fn getInnerNamespace(decl: *Decl, mod: *Module) ?*Namespace { + return if (getInnerNamespaceIndex(decl, mod).unwrap()) |i| mod.namespacePtr(i) else null; } pub fn dump(decl: *Decl) void { @@ -920,8 +932,8 @@ pub const Decl = struct { std.debug.print("\n", .{}); } - pub fn getFileScope(decl: Decl) *File { - return decl.src_namespace.file_scope; + pub fn getFileScope(decl: Decl, mod: *Module) *File { + return mod.namespacePtr(decl.src_namespace).file_scope; } pub fn removeDependant(decl: *Decl, other: Decl.Index) void { @@ -974,7 +986,7 @@ pub const ErrorSet = struct { pub fn srcLoc(self: ErrorSet, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1000,7 +1012,7 @@ pub const Struct = struct { /// Set of field names in declaration order. fields: Fields, /// Represents the declarations inside this struct. - namespace: Namespace, + namespace: Namespace.Index, /// The Decl that corresponds to the struct itself. owner_decl: Decl.Index, /// Index of the struct_decl ZIR instruction. @@ -1101,7 +1113,7 @@ pub const Struct = struct { pub fn srcLoc(s: Struct, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(s.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1110,7 +1122,7 @@ pub const Struct = struct { pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(s.owner_decl); - const file = owner_decl.getFileScope(); + const file = owner_decl.getFileScope(mod); const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ @@ -1224,7 +1236,7 @@ pub const EnumSimple = struct { pub fn srcLoc(self: EnumSimple, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1253,7 +1265,7 @@ pub const EnumNumbered = struct { pub fn srcLoc(self: EnumNumbered, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1275,7 +1287,7 @@ pub const EnumFull = struct { /// If this hash map is empty, it means the enum tags are auto-numbered. values: ValueMap, /// Represents the declarations inside this enum. - namespace: Namespace, + namespace: Namespace.Index, /// true if zig inferred this tag type, false if user specified it tag_ty_inferred: bool, @@ -1285,7 +1297,7 @@ pub const EnumFull = struct { pub fn srcLoc(self: EnumFull, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1294,7 +1306,7 @@ pub const EnumFull = struct { pub fn fieldSrcLoc(e: EnumFull, mod: *Module, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(e.owner_decl); - const file = owner_decl.getFileScope(); + const file = owner_decl.getFileScope(mod); const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ @@ -1323,7 +1335,7 @@ pub const Union = struct { /// Set of field names in declaration order. fields: Fields, /// Represents the declarations inside this union. - namespace: Namespace, + namespace: Namespace.Index, /// The Decl that corresponds to the union itself. owner_decl: Decl.Index, /// Index of the union_decl ZIR instruction. @@ -1371,7 +1383,7 @@ pub const Union = struct { pub fn srcLoc(self: Union, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; @@ -1380,7 +1392,7 @@ pub const Union = struct { pub fn fieldSrcLoc(u: Union, mod: *Module, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(u.owner_decl); - const file = owner_decl.getFileScope(); + const file = owner_decl.getFileScope(mod); const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ @@ -1563,26 +1575,6 @@ pub const Union = struct { } }; -pub const Opaque = struct { - /// The Decl that corresponds to the opaque itself. - owner_decl: Decl.Index, - /// Represents the declarations inside this opaque. - namespace: Namespace, - - pub fn srcLoc(self: Opaque, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - pub fn getFullyQualifiedName(s: *Opaque, mod: *Module) ![:0]u8 { - return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); - } -}; - /// Some extern function struct memory is owned by the Decl's TypedValue.Managed /// arena allocator. pub const ExternFn = struct { @@ -1759,7 +1751,7 @@ pub const Fn = struct { } pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { - const file = mod.declPtr(func.owner_decl).getFileScope(); + const file = mod.declPtr(func.owner_decl).getFileScope(mod); const tags = file.zir.instructions.items(.tag); @@ -1774,7 +1766,7 @@ pub const Fn = struct { } pub fn getParamName(func: Fn, mod: *Module, index: u32) [:0]const u8 { - const file = mod.declPtr(func.owner_decl).getFileScope(); + const file = mod.declPtr(func.owner_decl).getFileScope(mod); const tags = file.zir.instructions.items(.tag); const data = file.zir.instructions.items(.data); @@ -1797,7 +1789,7 @@ pub const Fn = struct { pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool { const owner_decl = mod.declPtr(func.owner_decl); - const zir = owner_decl.getFileScope().zir; + const zir = owner_decl.getFileScope(mod).zir; const zir_tags = zir.instructions.items(.tag); switch (zir_tags[func.zir_body_inst]) { .func => return false, @@ -1851,7 +1843,7 @@ pub const DeclAdapter = struct { /// The container that structs, enums, unions, and opaques have. pub const Namespace = struct { - parent: ?*Namespace, + parent: OptionalIndex, file_scope: *File, /// Will be a struct, enum, union, or opaque. ty: Type, @@ -1869,6 +1861,28 @@ pub const Namespace = struct { /// Value is whether the usingnamespace decl is marked `pub`. usingnamespace_set: std.AutoHashMapUnmanaged(Decl.Index, bool) = .{}, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + const DeclContext = struct { module: *Module, @@ -1955,10 +1969,10 @@ pub const Namespace = struct { name: []const u8, writer: anytype, ) @TypeOf(writer).Error!void { - if (ns.parent) |parent| { - const decl_index = ns.getDeclIndex(); + if (ns.parent.unwrap()) |parent| { + const decl_index = ns.getDeclIndex(mod); const decl = mod.declPtr(decl_index); - try parent.renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer); + try mod.namespacePtr(parent).renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer); } else { try ns.file_scope.renderFullyQualifiedName(writer); } @@ -1976,10 +1990,10 @@ pub const Namespace = struct { writer: anytype, ) @TypeOf(writer).Error!void { var separator_char: u8 = '.'; - if (ns.parent) |parent| { - const decl_index = ns.getDeclIndex(); + if (ns.parent.unwrap()) |parent| { + const decl_index = ns.getDeclIndex(mod); const decl = mod.declPtr(decl_index); - try parent.renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer); + try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer); } else { try ns.file_scope.renderFullyQualifiedDebugName(writer); separator_char = ':'; @@ -1990,8 +2004,8 @@ pub const Namespace = struct { } } - pub fn getDeclIndex(ns: Namespace) Decl.Index { - return ns.ty.getOwnerDecl(); + pub fn getDeclIndex(ns: Namespace, mod: *Module) Decl.Index { + return ns.ty.getOwnerDecl(mod); } }; @@ -3320,7 +3334,7 @@ pub const LazySrcLoc = union(enum) { } /// Upgrade to a `SrcLoc` based on the `Decl` provided. - pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl) SrcLoc { + pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl, mod: *Module) SrcLoc { return switch (lazy) { .unneeded, .entire_file, @@ -3328,7 +3342,7 @@ pub const LazySrcLoc = union(enum) { .token_abs, .node_abs, => .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = 0, .lazy = lazy, }, @@ -3394,7 +3408,7 @@ pub const LazySrcLoc = union(enum) { .for_input, .for_capture_from_input, => .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = lazy, }, @@ -3555,6 +3569,9 @@ pub fn deinit(mod: *Module) void { mod.global_assembly.deinit(gpa); mod.reference_table.deinit(gpa); + mod.namespaces_free_list.deinit(gpa); + mod.allocated_namespaces.deinit(gpa); + mod.string_literal_table.deinit(gpa); mod.string_literal_bytes.deinit(gpa); @@ -3575,8 +3592,9 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { gpa.free(kv.value); } if (decl.has_tv) { - if (decl.getInnerNamespace()) |namespace| { - namespace.destroyDecls(mod); + if (decl.getInnerNamespaceIndex(mod).unwrap()) |i| { + mod.namespacePtr(i).destroyDecls(mod); + mod.destroyNamespace(i); } } decl.clearValues(mod); @@ -3596,16 +3614,21 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { } } -pub fn declPtr(mod: *Module, decl_index: Decl.Index) *Decl { - return mod.allocated_decls.at(@enumToInt(decl_index)); +pub fn declPtr(mod: *Module, index: Decl.Index) *Decl { + return mod.allocated_decls.at(@enumToInt(index)); +} + +pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace { + return mod.allocated_namespaces.at(@enumToInt(index)); } /// Returns true if and only if the Decl is the top level struct associated with a File. pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { const decl = mod.declPtr(decl_index); - if (decl.src_namespace.parent != null) + const namespace = mod.namespacePtr(decl.src_namespace); + if (namespace.parent != .none) return false; - return decl_index == decl.src_namespace.getDeclIndex(); + return decl_index == namespace.getDeclIndex(mod); } fn freeExportList(gpa: Allocator, export_list: *ArrayListUnmanaged(*Export)) void { @@ -4076,7 +4099,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { }; } - if (decl.getInnerNamespace()) |namespace| { + if (decl.getInnerNamespace(mod)) |namespace| { for (namespace.decls.keys()) |sub_decl| { try decl_stack.append(gpa, sub_decl); } @@ -4306,7 +4329,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( mod.gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to analyze: {s}", .{@errorName(e)}, )); @@ -4437,7 +4460,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "invalid liveness: {s}", .{@errorName(err)}, ), @@ -4460,7 +4483,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { try mod.failed_decls.ensureUnusedCapacity(gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -4586,13 +4609,13 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .status = .none, .known_non_opv = undefined, .is_tuple = undefined, // set below - .namespace = .{ - .parent = null, + .namespace = try mod.createNamespace(.{ + .parent = .none, .ty = struct_ty, .file_scope = file, - }, + }), }; - const new_decl_index = try mod.allocateNewDecl(&struct_obj.namespace, 0, null); + const new_decl_index = try mod.allocateNewDecl(struct_obj.namespace, 0, null); const new_decl = mod.declPtr(new_decl_index); file.root_decl = new_decl_index.toOptional(); struct_obj.owner_decl = new_decl_index; @@ -4688,12 +4711,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { const decl = mod.declPtr(decl_index); - if (decl.getFileScope().status != .success_zir) { + if (decl.getFileScope(mod).status != .success_zir) { return error.AnalysisFail; } const gpa = mod.gpa; - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const zir_datas = zir.instructions.items(.data); decl.analysis = .in_progress; @@ -4767,7 +4790,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { block_scope.params.deinit(gpa); } - const zir_block_index = decl.zirBlockIndex(); + const zir_block_index = decl.zirBlockIndex(mod); const inst_data = zir_datas[zir_block_index].pl_node; const extra = zir.extraData(Zir.Inst.Block, inst_data.payload_index); const body = zir.extra[extra.end..][0..extra.data.body_len]; @@ -4792,7 +4815,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }); } const ty = try decl_tv.val.toType().copy(decl_arena_allocator); - if (ty.getNamespace() == null) { + if (ty.getNamespace(mod) == null) { return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); } @@ -4895,12 +4918,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.ty = try decl_tv.ty.copy(decl_arena_allocator); decl.val = try decl_tv.val.copy(decl_arena_allocator); decl.@"align" = blk: { - const align_ref = decl.zirAlignRef(); + const align_ref = decl.zirAlignRef(mod); if (align_ref == .none) break :blk 0; break :blk try sema.resolveAlign(&block_scope, align_src, align_ref); }; decl.@"linksection" = blk: { - const linksection_ref = decl.zirLinksectionRef(); + const linksection_ref = decl.zirLinksectionRef(mod); if (linksection_ref == .none) break :blk null; const bytes = try sema.resolveConstString(&block_scope, section_src, linksection_ref, "linksection must be comptime-known"); if (mem.indexOfScalar(u8, bytes, 0) != null) { @@ -4921,7 +4944,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }; const target = sema.mod.getTarget(); - break :blk switch (decl.zirAddrspaceRef()) { + break :blk switch (decl.zirAddrspaceRef(mod)) { .none => switch (addrspace_ctx) { .function => target_util.defaultAddressSpace(target, .function), .variable => target_util.defaultAddressSpace(target, .global_mutable), @@ -5273,7 +5296,7 @@ pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void { pub fn scanNamespace( mod: *Module, - namespace: *Namespace, + namespace_index: Namespace.Index, extra_start: usize, decls_len: u32, parent_decl: *Decl, @@ -5282,6 +5305,7 @@ pub fn scanNamespace( defer tracy.end(); const gpa = mod.gpa; + const namespace = mod.namespacePtr(namespace_index); const zir = namespace.file_scope.zir; try mod.comp.work_queue.ensureUnusedCapacity(decls_len); @@ -5294,7 +5318,7 @@ pub fn scanNamespace( var decl_i: u32 = 0; var scan_decl_iter: ScanDeclIter = .{ .module = mod, - .namespace = namespace, + .namespace_index = namespace_index, .parent_decl = parent_decl, }; while (decl_i < decls_len) : (decl_i += 1) { @@ -5317,7 +5341,7 @@ pub fn scanNamespace( const ScanDeclIter = struct { module: *Module, - namespace: *Namespace, + namespace_index: Namespace.Index, parent_decl: *Decl, usingnamespace_index: usize = 0, comptime_index: usize = 0, @@ -5329,7 +5353,8 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err defer tracy.end(); const mod = iter.module; - const namespace = iter.namespace; + const namespace_index = iter.namespace_index; + const namespace = mod.namespacePtr(namespace_index); const gpa = mod.gpa; const zir = namespace.file_scope.zir; @@ -5404,7 +5429,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err ); const comp = mod.comp; if (!gop.found_existing) { - const new_decl_index = try mod.allocateNewDecl(namespace, decl_node, iter.parent_decl.src_scope); + const new_decl_index = try mod.allocateNewDecl(namespace_index, decl_node, iter.parent_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); new_decl.kind = kind; new_decl.name = decl_name; @@ -5456,7 +5481,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err const decl = mod.declPtr(decl_index); if (kind == .@"test") { const src_loc = SrcLoc{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = .{ .token_offset = 1 }, }; @@ -5564,7 +5589,7 @@ pub fn clearDecl( if (decl.ty.isFnOrHasRuntimeBits(mod)) { mod.comp.bin_file.freeDecl(decl_index); } - if (decl.getInnerNamespace()) |namespace| { + if (decl.getInnerNamespace(mod)) |namespace| { try namespace.deleteAllDecls(mod, outdated_decls); } } @@ -5584,7 +5609,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name }); assert(!mod.declIsRoot(decl_index)); - assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); + assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); const dependants = decl.dependants.keys(); for (dependants) |dep| { @@ -5612,7 +5637,7 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { log.debug("abortAnonDecl {*} ({s})", .{ decl, decl.name }); assert(!mod.declIsRoot(decl_index)); - assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); + assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); // An aborted decl must not have dependants -- they must have // been aborted first and removed from this list. @@ -5689,7 +5714,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { .gpa = gpa, .arena = arena, .perm_arena = decl_arena_allocator, - .code = decl.getFileScope().zir, + .code = decl.getFileScope(mod).zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = func, @@ -5920,9 +5945,34 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { decl.analysis = .outdated; } +pub const CreateNamespaceOptions = struct { + parent: Namespace.OptionalIndex, + file_scope: *File, + ty: Type, +}; + +pub fn createNamespace(mod: *Module, options: CreateNamespaceOptions) !Namespace.Index { + if (mod.namespaces_free_list.popOrNull()) |index| return index; + const ptr = try mod.allocated_namespaces.addOne(mod.gpa); + ptr.* = .{ + .parent = options.parent, + .file_scope = options.file_scope, + .ty = options.ty, + }; + return @intToEnum(Namespace.Index, mod.allocated_namespaces.len - 1); +} + +pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { + mod.namespacePtr(index).* = undefined; + mod.namespaces_free_list.append(mod.gpa, index) catch { + // In order to keep `destroyNamespace` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Namespace until garbage collection. + }; +} + pub fn allocateNewDecl( mod: *Module, - namespace: *Namespace, + namespace: Namespace.Index, src_node: Ast.Node.Index, src_scope: ?*CaptureScope, ) !Decl.Index { @@ -6004,7 +6054,7 @@ pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedV pub fn createAnonymousDeclFromDecl( mod: *Module, src_decl: *Decl, - namespace: *Namespace, + namespace: Namespace.Index, src_scope: ?*CaptureScope, tv: TypedValue, ) !Decl.Index { @@ -6022,7 +6072,7 @@ pub fn initNewAnonDecl( mod: *Module, new_decl_index: Decl.Index, src_line: u32, - namespace: *Namespace, + namespace: Namespace.Index, typed_value: TypedValue, name: [:0]u8, ) !void { @@ -6040,7 +6090,7 @@ pub fn initNewAnonDecl( new_decl.analysis = .complete; new_decl.generation = mod.generation; - try namespace.anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); + try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); // The Decl starts off with alive=false and the codegen backend will set alive=true // if the Decl is referenced by an instruction or another constant. Otherwise, @@ -6110,16 +6160,17 @@ pub const SwitchProngSrc = union(enum) { /// the LazySrcLoc in order to emit a compile error. pub fn resolve( prong_src: SwitchProngSrc, - gpa: Allocator, + mod: *Module, decl: *Decl, switch_node_offset: i32, range_expand: RangeExpand, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6203,11 +6254,12 @@ pub const PeerTypeCandidateSrc = union(enum) { pub fn resolve( self: PeerTypeCandidateSrc, - gpa: Allocator, + mod: *Module, decl: *Decl, candidate_i: usize, ) ?LazySrcLoc { @setCold(true); + const gpa = mod.gpa; switch (self) { .none => { @@ -6229,10 +6281,10 @@ pub const PeerTypeCandidateSrc = union(enum) { else => {}, } - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6291,15 +6343,16 @@ fn queryFieldSrc( pub fn paramSrc( func_node_offset: i32, - gpa: Allocator, + mod: *Module, decl: *Decl, param_i: usize, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6321,19 +6374,20 @@ pub fn paramSrc( } pub fn argSrc( + mod: *Module, call_node_offset: i32, - gpa: Allocator, decl: *Decl, start_arg_i: usize, bound_arg_src: ?LazySrcLoc, ) LazySrcLoc { + @setCold(true); + const gpa = mod.gpa; if (start_arg_i == 0 and bound_arg_src != null) return bound_arg_src.?; const arg_i = start_arg_i - @boolToInt(bound_arg_src != null); - @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6347,7 +6401,7 @@ pub fn argSrc( const node_datas = tree.nodes.items(.data); const call_args_node = tree.extra_data[node_datas[node].rhs - 1]; const call_args_offset = decl.nodeIndexToRelative(call_args_node); - return initSrc(call_args_offset, gpa, decl, arg_i); + return mod.initSrc(call_args_offset, decl, arg_i); }, else => unreachable, }; @@ -6355,16 +6409,17 @@ pub fn argSrc( } pub fn initSrc( + mod: *Module, init_node_offset: i32, - gpa: Allocator, decl: *Decl, init_index: usize, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6400,12 +6455,13 @@ pub fn initSrc( } } -pub fn optionsSrc(gpa: Allocator, decl: *Decl, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { +pub fn optionsSrc(mod: *Module, decl: *Decl, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6471,7 +6527,10 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { // Remove from the namespace it resides in, preserving declaration order. assert(decl.zir_decl_index != 0); - _ = decl.src_namespace.decls.orderedRemoveAdapted(@as([]const u8, mem.sliceTo(decl.name, 0)), DeclAdapter{ .mod = mod }); + _ = mod.namespacePtr(decl.src_namespace).decls.orderedRemoveAdapted( + @as([]const u8, mem.sliceTo(decl.name, 0)), + DeclAdapter{ .mod = mod }, + ); try mod.clearDecl(decl_index, &outdated_decls); mod.destroyDecl(decl_index); @@ -6541,8 +6600,11 @@ pub fn populateTestFunctions( const builtin_pkg = mod.main_pkg.table.get("builtin").?; const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?); - const builtin_namespace = root_decl.src_namespace; - const decl_index = builtin_namespace.decls.getKeyAdapted(@as([]const u8, "test_functions"), DeclAdapter{ .mod = mod }).?; + const builtin_namespace = mod.namespacePtr(root_decl.src_namespace); + const decl_index = builtin_namespace.decls.getKeyAdapted( + @as([]const u8, "test_functions"), + DeclAdapter{ .mod = mod }, + ).?; { // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` // was not referenced by start code. @@ -6673,7 +6735,7 @@ pub fn linkerUpdateDecl(mod: *Module, decl_index: Decl.Index) !void { try mod.failed_decls.ensureUnusedCapacity(gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -7138,3 +7200,24 @@ pub fn atomicPtrAlignment( return 0; } + +pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc { + const owner_decl = mod.declPtr(opaque_type.decl); + return .{ + .file_scope = owner_decl.getFileScope(mod), + .parent_decl_node = owner_decl.src_node, + .lazy = LazySrcLoc.nodeOffset(0), + }; +} + +pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) ![:0]u8 { + return mod.declPtr(opaque_type.decl).getFullyQualifiedName(mod); +} + +pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File { + return mod.declPtr(decl_index).getFileScope(mod); +} + +pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.Index { + return mod.namespacePtr(namespace_index).getDeclIndex(mod); +} diff --git a/src/Sema.zig b/src/Sema.zig index 3d67324673..35440395c4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -227,7 +227,7 @@ pub const Block = struct { sema: *Sema, /// The namespace to use for lookups from this source block /// When analyzing fields, this is different from src_decl.src_namespace. - namespace: *Namespace, + namespace: Namespace.Index, /// The AIR instructions generated for this block. instructions: std.ArrayListUnmanaged(Air.Inst.Index), // `param` instructions are collected here to be used by the `func` instruction. @@ -286,6 +286,7 @@ pub const Block = struct { fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void { const parent = msg orelse return; + const mod = sema.mod; const prefix = "expression is evaluated at comptime because "; switch (cr) { .c_import => |ci| { @@ -293,12 +294,12 @@ pub const Block = struct { }, .comptime_ret_ty => |rt| { const src_loc = if (try sema.funcDeclSrc(rt.func)) |fn_decl| blk: { - var src_loc = fn_decl.srcLoc(); + var src_loc = fn_decl.srcLoc(mod); src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 }; break :blk src_loc; } else blk: { const src_decl = sema.mod.declPtr(rt.block.src_decl); - break :blk rt.func_src.toSrcLoc(src_decl); + break :blk rt.func_src.toSrcLoc(src_decl, mod); }; if (rt.return_ty.isGenericPoison()) { return sema.mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); @@ -399,8 +400,8 @@ pub const Block = struct { }; } - pub fn getFileScope(block: *Block) *Module.File { - return block.namespace.file_scope; + pub fn getFileScope(block: *Block, mod: *Module) *Module.File { + return mod.namespacePtr(block.namespace).file_scope; } fn addTy( @@ -876,6 +877,7 @@ fn analyzeBodyInner( wip_captures.deinit(); }; + const mod = sema.mod; const map = &sema.inst_map; const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); @@ -896,7 +898,7 @@ fn analyzeBodyInner( crash_info.setBodyIndex(i); const inst = body[i]; std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ - sema.mod.declPtr(block.src_decl).src_namespace.file_scope.sub_file_path, inst, + mod.namespacePtr(mod.declPtr(block.src_decl).src_namespace).file_scope.sub_file_path, inst, }); const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off @@ -1574,7 +1576,6 @@ fn analyzeBodyInner( }, .condbr => blk: { if (!block.is_comptime) break sema.zirCondbr(block, inst); - const mod = sema.mod; // Same as condbr_inline. TODO https://github.com/ziglang/zig/issues/8220 const inst_data = datas[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; @@ -1597,7 +1598,6 @@ fn analyzeBodyInner( } }, .condbr_inline => blk: { - const mod = sema.mod; const inst_data = datas[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); @@ -1622,7 +1622,6 @@ fn analyzeBodyInner( }, .@"try" => blk: { if (!block.is_comptime) break :blk try sema.zirTry(block, inst); - const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -1632,7 +1631,7 @@ fn analyzeBodyInner( const err_union_ty = sema.typeOf(err_union); if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); @@ -1654,7 +1653,6 @@ fn analyzeBodyInner( }, .try_ptr => blk: { if (!block.is_comptime) break :blk try sema.zirTryPtr(block, inst); - const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -1713,7 +1711,7 @@ fn analyzeBodyInner( const noreturn_inst = block.instructions.popOrNull(); while (dbg_block_begins > 0) { dbg_block_begins -= 1; - if (block.is_comptime or sema.mod.comp.bin_file.options.strip) continue; + if (block.is_comptime or mod.comp.bin_file.options.strip) continue; _ = try block.addInst(.{ .tag = .dbg_block_end, @@ -2172,7 +2170,7 @@ fn errNote( ) error{OutOfMemory}!void { const mod = sema.mod; const src_decl = mod.declPtr(block.src_decl); - return mod.errNoteNonLazy(src.toSrcLoc(src_decl), parent, format, args); + return mod.errNoteNonLazy(src.toSrcLoc(src_decl, mod), parent, format, args); } fn addFieldErrNote( @@ -2185,19 +2183,19 @@ fn addFieldErrNote( ) !void { @setCold(true); const mod = sema.mod; - const decl_index = container_ty.getOwnerDecl(); + const decl_index = container_ty.getOwnerDecl(mod); const decl = mod.declPtr(decl_index); const field_src = blk: { - const tree = decl.getFileScope().getTree(sema.gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(sema.gpa) catch |err| { log.err("unable to load AST to report compile error: {s}", .{@errorName(err)}); - break :blk decl.srcLoc(); + break :blk decl.srcLoc(mod); }; const container_node = decl.relativeToNodeIndex(0); const node_tags = tree.nodes.items(.tag); var buf: [2]std.zig.Ast.Node.Index = undefined; - const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(); + const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(mod); var it_index: usize = 0; for (container_decl.ast.members) |member_node| { @@ -2207,7 +2205,7 @@ fn addFieldErrNote( .container_field, => { if (it_index == field_index) { - break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node)); + break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node), mod); } it_index += 1; }, @@ -2228,7 +2226,7 @@ fn errMsg( ) error{OutOfMemory}!*Module.ErrorMsg { const mod = sema.mod; const src_decl = mod.declPtr(block.src_decl); - return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl), format, args); + return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl, mod), format, args); } pub fn fail( @@ -2287,7 +2285,7 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { if (gop.found_existing) break; if (cur_reference_trace < max_references) { const decl = sema.mod.declPtr(ref.referencer); - try reference_stack.append(.{ .decl = decl.name, .src_loc = ref.src.toSrcLoc(decl) }); + try reference_stack.append(.{ .decl = decl.name, .src_loc = ref.src.toSrcLoc(decl, mod) }); } referenced_by = ref.referencer; } @@ -2664,7 +2662,7 @@ pub fn analyzeStructDecl( } } - _ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl); + _ = try sema.mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl); } fn zirStructDecl( @@ -2702,15 +2700,12 @@ fn zirStructDecl( .status = .none, .known_non_opv = undefined, .is_tuple = small.is_tuple, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = struct_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; - std.log.scoped(.module).debug("create struct {*} owned by {*} ({s})", .{ - &struct_obj.namespace, new_decl, new_decl.name, - }); try sema.analyzeStructDecl(new_decl, inst, struct_obj); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); @@ -2887,15 +2882,12 @@ fn zirEnumDecl( .tag_ty_inferred = true, .fields = .{}, .values = .{}, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = enum_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; - std.log.scoped(.module).debug("create enum {*} owned by {*} ({s})", .{ - &enum_obj.namespace, new_decl, new_decl.name, - }); try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); @@ -2905,7 +2897,7 @@ fn zirEnumDecl( const decl_arena_allocator = new_decl.value_arena.?.acquire(gpa, &decl_arena); defer new_decl.value_arena.?.release(&decl_arena); - extra_index = try mod.scanNamespace(&enum_obj.namespace, extra_index, decls_len, new_decl); + extra_index = try mod.scanNamespace(enum_obj.namespace, extra_index, decls_len, new_decl); const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; @@ -2944,7 +2936,7 @@ fn zirEnumDecl( .parent = null, .sema = sema, .src_decl = new_decl_index, - .namespace = &enum_obj.namespace, + .namespace = enum_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -3164,17 +3156,14 @@ fn zirUnionDecl( .zir_index = inst, .layout = small.layout, .status = .none, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = union_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; - std.log.scoped(.module).debug("create union {*} owned by {*} ({s})", .{ - &union_obj.namespace, new_decl, new_decl.name, - }); - _ = try mod.scanNamespace(&union_obj.namespace, extra_index, decls_len, new_decl); + _ = try mod.scanNamespace(union_obj.namespace, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); @@ -3208,37 +3197,37 @@ fn zirOpaqueDecl( var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); - const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); - opaque_ty_payload.* = .{ - .base = .{ .tag = .@"opaque" }, - .data = opaque_obj, - }; - const opaque_ty = Type.initPayload(&opaque_ty_payload.base); - const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); + // Because these three things each reference each other, `undefined` + // placeholders are used in two places before being set after the opaque + // type gains an InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = opaque_val, + .val = undefined, }, small.name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - opaque_obj.* = .{ - .owner_decl = new_decl_index, - .namespace = .{ - .parent = block.namespace, - .ty = opaque_ty, - .file_scope = block.getFileScope(), - }, - }; - std.log.scoped(.module).debug("create opaque {*} owned by {*} ({s})", .{ - &opaque_obj.namespace, new_decl, new_decl.name, + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer @panic("TODO error handling"); + + const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{ + .decl = new_decl_index, + .namespace = new_namespace_index, + } }); + errdefer @panic("TODO error handling"); + + new_decl.val = opaque_ty.toValue(); + new_namespace.ty = opaque_ty.toType(); - extra_index = try mod.scanNamespace(&opaque_obj.namespace, extra_index, decls_len, new_decl); + extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); @@ -4848,7 +4837,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), elem_ty); + try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -4870,7 +4859,7 @@ fn failWithBadMemberAccess( .Enum => "enum", else => unreachable, }; - if (agg_ty.getOwnerDeclOrNull()) |some| if (sema.mod.declIsRoot(some)) { + if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (sema.mod.declIsRoot(some)) { return sema.fail(block, field_src, "root struct of file '{}' has no member named '{s}'", .{ agg_ty.fmt(sema.mod), field_name, }); @@ -5632,7 +5621,7 @@ fn analyzeBlockBody( try sema.errNote(child_block, runtime_src, msg, "runtime control flow here", .{}); const child_src_decl = mod.declPtr(child_block.src_decl); - try sema.explainWhyTypeIsComptime(msg, type_src.toSrcLoc(child_src_decl), resolved_ty); + try sema.explainWhyTypeIsComptime(msg, type_src.toSrcLoc(child_src_decl, mod), resolved_ty); break :msg msg; }; @@ -5703,6 +5692,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; const src = inst_data.src(); @@ -5711,7 +5701,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const decl_name = sema.code.nullTerminatedString(extra.decl_name); const decl_index = if (extra.namespace != .none) index_blk: { const container_ty = try sema.resolveType(block, operand_src, extra.namespace); - const container_namespace = container_ty.getNamespace().?; + const container_namespace = container_ty.getNamespaceIndex(mod).unwrap().?; const maybe_index = try sema.lookupInNamespace(block, operand_src, container_namespace, decl_name, false); break :index_blk maybe_index orelse @@ -5725,8 +5715,8 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void else => |e| return e, }; { - try sema.mod.ensureDeclAnalyzed(decl_index); - const exported_decl = sema.mod.declPtr(decl_index); + try mod.ensureDeclAnalyzed(decl_index); + const exported_decl = mod.declPtr(decl_index); if (exported_decl.val.castTag(.function)) |some| { return sema.analyzeExport(block, src, options, some.data.owner_decl); } @@ -5789,7 +5779,7 @@ pub fn analyzeExport( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), exported_decl.ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), exported_decl.ty, .other); try sema.addDeclaredHereNote(msg, exported_decl.ty); break :msg msg; @@ -6075,12 +6065,13 @@ fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !Decl.Index { + const mod = sema.mod; var namespace = block.namespace; while (true) { if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl_index| { return decl_index; } - namespace = namespace.parent orelse break; + namespace = mod.namespacePtr(namespace).parent.unwrap() orelse break; } unreachable; // AstGen detects use of undeclared identifier errors. } @@ -6091,13 +6082,14 @@ fn lookupInNamespace( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, + namespace_index: Namespace.Index, ident_name: []const u8, observe_usingnamespace: bool, ) CompileError!?Decl.Index { const mod = sema.mod; - const namespace_decl_index = namespace.getDeclIndex(); + const namespace = mod.namespacePtr(namespace_index); + const namespace_decl_index = namespace.getDeclIndex(mod); const namespace_decl = sema.mod.declPtr(namespace_decl_index); if (namespace_decl.analysis == .file_failure) { try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index); @@ -6105,7 +6097,7 @@ fn lookupInNamespace( } if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) { - const src_file = block.namespace.file_scope; + const src_file = mod.namespacePtr(block.namespace).file_scope; const gpa = sema.gpa; var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, bool) = .{}; @@ -6124,7 +6116,7 @@ fn lookupInNamespace( // Skip decls which are not marked pub, which are in a different // file than the `a.b`/`@hasDecl` syntax. const decl = mod.declPtr(decl_index); - if (decl.is_pub or (src_file == decl.getFileScope() and checked_namespaces.values()[check_i])) { + if (decl.is_pub or (src_file == decl.getFileScope(mod) and checked_namespaces.values()[check_i])) { try candidates.append(gpa, decl_index); } } @@ -6135,15 +6127,15 @@ fn lookupInNamespace( if (sub_usingnamespace_decl_index == sema.owner_decl_index) continue; const sub_usingnamespace_decl = mod.declPtr(sub_usingnamespace_decl_index); const sub_is_pub = entry.value_ptr.*; - if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope()) { + if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope(mod)) { // Skip usingnamespace decls which are not marked pub, which are in // a different file than the `a.b`/`@hasDecl` syntax. continue; } try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index); const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data; - const sub_ns = ns_ty.getNamespace().?; - try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope()); + const sub_ns = ns_ty.getNamespace(mod).?; + try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope(mod)); } } @@ -6171,7 +6163,7 @@ fn lookupInNamespace( errdefer msg.destroy(gpa); for (candidates.items) |candidate_index| { const candidate = mod.declPtr(candidate_index); - const src_loc = candidate.srcLoc(); + const src_loc = candidate.srcLoc(mod); try mod.errNoteNonLazy(src_loc, msg, "declared here", .{}); } break :msg msg; @@ -6532,7 +6524,7 @@ fn checkCallArgumentCount( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6669,7 +6661,7 @@ fn analyzeCall( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6811,7 +6803,7 @@ fn analyzeCall( // than create a child one. const parent_zir = sema.code; const fn_owner_decl = mod.declPtr(module_fn.owner_decl); - sema.code = fn_owner_decl.getFileScope().zir; + sema.code = fn_owner_decl.getFileScope(mod).zir; defer sema.code = parent_zir; try mod.declareDeclDependencyType(sema.owner_decl_index, module_fn.owner_decl, .function_body); @@ -6911,7 +6903,7 @@ fn analyzeCall( try sema.analyzeInlineCallArg( block, &child_block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, arg_i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src), inst, new_fn_info, &arg_i, @@ -7098,7 +7090,7 @@ fn analyzeCall( const decl = sema.mod.declPtr(block.src_decl); _ = try sema.analyzeCallArg( block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), param_ty, uncasted_arg, opts, @@ -7114,7 +7106,7 @@ fn analyzeCall( _ = try sema.coerceVarArgParam( block, uncasted_arg, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), ); unreachable; }, @@ -7406,7 +7398,8 @@ fn instantiateGenericCall( // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. const fn_owner_decl = mod.declPtr(module_fn.owner_decl); - const namespace = fn_owner_decl.src_namespace; + const namespace_index = fn_owner_decl.src_namespace; + const namespace = mod.namespacePtr(namespace_index); const fn_zir = namespace.file_scope.zir; const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); const zir_tags = fn_zir.instructions.items(.tag); @@ -7456,7 +7449,7 @@ fn instantiateGenericCall( const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[i]) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const arg_src = Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src); _ = try sema.analyzeGenericCallArgVal(block, arg_src, uncasted_args[i]); unreachable; }, @@ -7519,9 +7512,9 @@ fn instantiateGenericCall( try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); // Create a Decl for the new function. - const src_decl_index = namespace.getDeclIndex(); + const src_decl_index = namespace.getDeclIndex(mod); const src_decl = mod.declPtr(src_decl_index); - const new_decl_index = try mod.allocateNewDecl(namespace, fn_owner_decl.src_node, src_decl.src_scope); + const new_decl_index = try mod.allocateNewDecl(namespace_index, fn_owner_decl.src_node, src_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); // TODO better names for generic function instantiations const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{ @@ -7559,7 +7552,7 @@ fn instantiateGenericCall( uncasted_args, module_fn, new_module_func, - namespace, + namespace_index, func_ty_info, call_src, bound_arg_src, @@ -7631,7 +7624,7 @@ fn instantiateGenericCall( const decl = sema.mod.declPtr(block.src_decl); _ = try sema.analyzeGenericCallArg( block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, total_i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, total_i, bound_arg_src), uncasted_args[total_i], comptime_args[total_i], runtime_args, @@ -7692,7 +7685,7 @@ fn resolveGenericInstantiationType( uncasted_args: []const Air.Inst.Ref, module_fn: *Module.Fn, new_module_func: *Module.Fn, - namespace: *Namespace, + namespace: Namespace.Index, func_ty_info: Type.Payload.Function.Data, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, @@ -7779,7 +7772,7 @@ fn resolveGenericInstantiationType( const arg_val = sema.resolveConstValue(block, .unneeded, arg, "") catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const arg_src = Module.argSrc(call_src.node_offset.x, sema.gpa, decl, arg_i, bound_arg_src); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); _ = try sema.resolveConstValue(block, arg_src, arg, "argument to parameter with comptime-only type must be comptime-known"); unreachable; }, @@ -8987,7 +8980,7 @@ fn funcCommon( const decl = sema.mod.declPtr(block.src_decl); try sema.analyzeParameter( block, - Module.paramSrc(src_node_offset, sema.gpa, decl, i), + Module.paramSrc(src_node_offset, mod, decl, i), param, comptime_params, i, @@ -9050,7 +9043,7 @@ fn funcCommon( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl), return_type, .ret_ty); + try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty); try sema.addDeclaredHereNote(msg, return_type); break :msg msg; @@ -9070,7 +9063,7 @@ fn funcCommon( "function with comptime-only return type '{}' requires all parameters to be comptime", .{return_type.fmt(sema.mod)}, ); - try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl), return_type); + try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type); const tags = sema.code.instructions.items(.tag); const data = sema.code.instructions.items(.data); @@ -9278,7 +9271,7 @@ fn analyzeParameter( errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl), param.ty, .param_ty); + try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl, mod), param.ty, .param_ty); try sema.addDeclaredHereNote(msg, param.ty); break :msg msg; @@ -9293,7 +9286,7 @@ fn analyzeParameter( errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl), param.ty); + try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl, mod), param.ty); try sema.addDeclaredHereNote(msg, param.ty); break :msg msg; @@ -10233,15 +10226,15 @@ fn zirSwitchCapture( if (!field.ty.eql(first_field.ty, sema.mod)) { const msg = msg: { const raw_capture_src = Module.SwitchProngSrc{ .multi_capture = capture_info.prong_index }; - const capture_src = raw_capture_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const capture_src = raw_capture_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{}); errdefer msg.destroy(sema.gpa); const raw_first_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 0 } }; - const first_item_src = raw_first_item_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const first_item_src = raw_first_item_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); const raw_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 1 + @intCast(u32, i) } }; - const item_src = raw_item_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const item_src = raw_item_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(sema.mod)}); try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(sema.mod)}); break :msg msg; @@ -11265,7 +11258,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .range = .{ .prong = multi_i, .item = range_i } }; const decl = mod.declPtr(case_block.src_decl); - try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); + try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none)); unreachable; }, else => return err, @@ -11301,7 +11294,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }; const decl = mod.declPtr(case_block.src_decl); - try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); + try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none)); unreachable; }, else => return err, @@ -11724,6 +11717,7 @@ fn resolveSwitchItemVal( switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, ) CompileError!TypedValue { + const mod = sema.mod; const item = try sema.resolveInst(item_ref); const item_ty = sema.typeOf(item); // Constructing a LazySrcLoc is costly because we only have the switch AST node. @@ -11734,7 +11728,7 @@ fn resolveSwitchItemVal( return TypedValue{ .ty = item_ty, .val = val }; } else |err| switch (err) { error.NeededSourceLocation => { - const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand); + const src = switch_prong_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand); _ = try sema.resolveConstValue(block, src, item, "switch prong values must be comptime-known"); unreachable; }, @@ -11752,10 +11746,11 @@ fn validateSwitchRange( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { + const mod = sema.mod; const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; - if (first_val.compareScalar(.gt, last_val, operand_ty, sema.mod)) { - const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), src_node_offset, .first); + if (first_val.compareScalar(.gt, last_val, operand_ty, mod)) { + const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), src_node_offset, .first); return sema.fail(block, src, "range start value is greater than the end value", .{}); } const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src); @@ -11821,10 +11816,10 @@ fn validateSwitchDupe( src_node_offset: i32, ) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; - const gpa = sema.gpa; + const mod = sema.mod; const block_src_decl = sema.mod.declPtr(block.src_decl); - const src = switch_prong_src.resolve(gpa, block_src_decl, src_node_offset, .none); - const prev_src = prev_prong_src.resolve(gpa, block_src_decl, src_node_offset, .none); + const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); + const prev_src = prev_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); const msg = msg: { const msg = try sema.errMsg( block, @@ -11863,7 +11858,7 @@ fn validateSwitchItemBool( } if (true_count.* + false_count.* > 2) { const block_src_decl = sema.mod.declPtr(block.src_decl); - const src = switch_prong_src.resolve(sema.gpa, block_src_decl, src_node_offset, .none); + const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); return sema.fail(block, src, "duplicate switch value", .{}); } } @@ -12068,6 +12063,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -12078,10 +12074,11 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air try sema.checkNamespaceType(block, lhs_src, container_type); - const namespace = container_type.getNamespace() orelse return Air.Inst.Ref.bool_false; + const namespace = container_type.getNamespaceIndex(mod).unwrap() orelse + return Air.Inst.Ref.bool_false; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { - const decl = sema.mod.declPtr(decl_index); - if (decl.is_pub or decl.getFileScope() == block.getFileScope()) { + const decl = mod.declPtr(decl_index); + if (decl.is_pub or decl.getFileScope(mod) == block.getFileScope(mod)) { return Air.Inst.Ref.bool_true; } } @@ -12097,12 +12094,12 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operand_src = inst_data.src(); const operand = inst_data.get(sema.code); - const result = mod.importFile(block.getFileScope(), operand) catch |err| switch (err) { + const result = mod.importFile(block.getFileScope(mod), operand) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand}); }, error.PackageNotFound => { - const name = try block.getFileScope().pkg.getName(sema.gpa, mod.*); + const name = try block.getFileScope(mod).pkg.getName(sema.gpa, mod.*); defer sema.gpa.free(name); return sema.fail(block, operand_src, "no package named '{s}' available within package '{s}'", .{ operand, name }); }, @@ -12128,7 +12125,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name = try sema.resolveConstString(block, operand_src, inst_data.operand, "file path name must be comptime-known"); - const embed_file = mod.embedFile(block.getFileScope(), name) catch |err| switch (err) { + const embed_file = mod.embedFile(block.getFileScope(mod), name) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, @@ -15666,7 +15663,8 @@ fn zirThis( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const this_decl_index = block.namespace.getDeclIndex(); + const mod = sema.mod; + const this_decl_index = mod.namespaceDeclIndex(block.namespace); const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); return sema.analyzeDeclVal(block, src, this_decl_index); } @@ -15698,9 +15696,10 @@ fn zirClosureGet( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; // TODO CLOSURE: Test this with inline functions const inst_data = sema.code.instructions.items(.data)[inst].inst_node; - var scope: *CaptureScope = sema.mod.declPtr(block.src_decl).src_scope.?; + var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?; // Note: The target closure must be in this scope list. // If it's not here, the zir is invalid, or the list is broken. const tv = while (true) { @@ -15725,8 +15724,8 @@ fn zirClosureGet( if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func == null) { const msg = msg: { const name = name: { - const file = sema.owner_decl.getFileScope(); - const tree = file.getTree(sema.mod.gpa) catch |err| { + const file = sema.owner_decl.getFileScope(mod); + const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15753,8 +15752,8 @@ fn zirClosureGet( if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { const msg = msg: { const name = name: { - const file = sema.owner_decl.getFileScope(); - const tree = file.getTree(sema.mod.gpa) catch |err| { + const file = sema.owner_decl.getFileScope(mod); + const tree = file.getTree(mod.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15825,7 +15824,7 @@ fn zirBuiltinSrc( const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{}); - const fn_owner_decl = sema.mod.declPtr(func.owner_decl); + const fn_owner_decl = mod.declPtr(func.owner_decl); const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); @@ -15844,7 +15843,7 @@ fn zirBuiltinSrc( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); // The compiler must not call realpath anywhere. - const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); + const name = try fn_owner_decl.getFileScope(mod).fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( try Type.array(anon_decl.arena(), name.len, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), @@ -15980,22 +15979,22 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fn_info_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "Fn", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); try sema.ensureDeclAnalyzed(fn_info_decl_index); - const fn_info_decl = sema.mod.declPtr(fn_info_decl_index); + const fn_info_decl = mod.declPtr(fn_info_decl_index); const fn_ty = fn_info_decl.val.toType(); const param_info_decl_index = (try sema.namespaceLookup( block, src, - fn_ty.getNamespace().?, + fn_ty.getNamespaceIndex(mod).unwrap().?, "Param", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); try sema.ensureDeclAnalyzed(param_info_decl_index); - const param_info_decl = sema.mod.declPtr(param_info_decl_index); + const param_info_decl = mod.declPtr(param_info_decl_index); const param_ty = param_info_decl.val.toType(); const new_decl = try params_anon_decl.finish( try Type.Tag.array.create(params_anon_decl.arena(), .{ @@ -16169,12 +16168,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const set_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "Error", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); - const set_field_ty_decl = sema.mod.declPtr(set_field_ty_decl_index); + const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index); break :t try set_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; @@ -16277,12 +16276,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const enum_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "EnumField", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); - const enum_field_ty_decl = sema.mod.declPtr(enum_field_ty_decl_index); + const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index); break :t try enum_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; @@ -16336,7 +16335,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :v try Value.Tag.decl_ref.create(sema.arena, new_decl); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace(mod)); const field_values = try sema.arena.create([4]Value); field_values.* = .{ @@ -16368,12 +16367,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const union_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "UnionField", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); - const union_field_ty_decl = sema.mod.declPtr(union_field_ty_decl_index); + const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index); break :t try union_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; @@ -16434,7 +16433,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespace(mod)); const enum_tag_ty_val = if (union_ty.unionTagType()) |tag_ty| v: { const ty_val = try Value.Tag.ty.create(sema.arena, tag_ty); @@ -16475,12 +16474,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const struct_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "StructField", )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); - const struct_field_ty_decl = sema.mod.declPtr(struct_field_ty_decl_index); + const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); break :t try struct_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; const struct_ty = try sema.resolveTypeFields(ty); @@ -16597,7 +16596,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace(mod)); const backing_integer_val = blk: { if (layout == .Packed) { @@ -16640,7 +16639,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // TODO: look into memoizing this result. const opaque_ty = try sema.resolveTypeFields(ty); - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespace(mod)); const field_values = try sema.arena.create([1]Value); field_values.* = .{ @@ -16676,7 +16675,7 @@ fn typeInfoDecls( const declaration_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, + type_info_ty.getNamespaceIndex(mod).unwrap().?, "Declaration", )).?; try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); @@ -16730,7 +16729,7 @@ fn typeInfoNamespaceDecls( if (decl.kind == .@"usingnamespace") { if (decl.analysis == .in_progress) continue; try mod.ensureDeclAnalyzed(decl_index); - const new_ns = decl.val.toType().getNamespace().?; + const new_ns = decl.val.toType().getNamespace(mod).?; try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); continue; } @@ -17750,7 +17749,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl), elem_ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl, mod), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); break :msg msg; @@ -18006,6 +18005,7 @@ fn finishStructInit( struct_ty: Type, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; var root_msg: ?*Module.ErrorMsg = null; @@ -18118,8 +18118,8 @@ fn finishStructInit( sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(dest_src.node_offset.x, sema.gpa, decl, runtime_index); + const decl = mod.declPtr(block.src_decl); + const field_src = mod.initSrc(dest_src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, dest_src, field_src); unreachable; }, @@ -18158,11 +18158,11 @@ fn zirStructInitAnon( if (gop.found_existing) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); errdefer msg.destroy(sema.gpa); - const prev_source = Module.initSrc(src.node_offset.x, sema.gpa, decl, gop.value_ptr.*); + const prev_source = mod.initSrc(src.node_offset.x, decl, gop.value_ptr.*); try sema.errNote(block, prev_source, msg, "other field here", .{}); break :msg msg; }; @@ -18175,7 +18175,7 @@ fn zirStructInitAnon( if (types[i].zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); @@ -18208,7 +18208,7 @@ fn zirStructInitAnon( sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, runtime_index); + const field_src = mod.initSrc(src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, src, field_src); unreachable; }, @@ -18283,7 +18283,7 @@ fn zirArrayInit( resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const elem_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const elem_src = mod.initSrc(src.node_offset.x, decl, i); _ = try sema.coerce(block, elem_ty, resolved_arg, elem_src); unreachable; }, @@ -18315,7 +18315,7 @@ fn zirArrayInit( sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { const decl = sema.mod.declPtr(block.src_decl); - const elem_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, runtime_index); + const elem_src = mod.initSrc(src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, src, elem_src); unreachable; }, @@ -18724,7 +18724,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air enum_ty.fmt(mod), }); } - const enum_decl_index = enum_ty.getOwnerDecl(); + const enum_decl_index = enum_ty.getOwnerDecl(mod); const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| { const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse { @@ -18734,7 +18734,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air val.fmtValue(enum_ty, sema.mod), enum_decl.name, }); errdefer msg.destroy(sema.gpa); - try mod.errNoteNonLazy(enum_decl.srcLoc(), msg, "declared here", .{}); + try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -18760,6 +18760,7 @@ fn zirReify( inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -18887,10 +18888,10 @@ fn zirReify( if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), elem_ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); break :msg msg; @@ -19043,7 +19044,6 @@ fn zirReify( return sema.fail(block, src, "reified enums must have no decls", .{}); } - const gpa = sema.gpa; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); @@ -19076,11 +19076,11 @@ fn zirReify( .tag_ty_inferred = false, .fields = .{}, .values = .{}, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = enum_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; // Enum tag type @@ -19164,34 +19164,37 @@ fn zirReify( return sema.fail(block, src, "reified opaque must have no decls", .{}); } - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); - const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); - opaque_ty_payload.* = .{ - .base = .{ .tag = .@"opaque" }, - .data = opaque_obj, - }; - const opaque_ty = Type.initPayload(&opaque_ty_payload.base); - const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); + // Because these three things each reference each other, + // `undefined` placeholders are used in two places before being set + // after the opaque type gains an InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = opaque_val, + .val = undefined, }, name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - opaque_obj.* = .{ - .owner_decl = new_decl_index, - .namespace = .{ - .parent = block.namespace, - .ty = opaque_ty, - .file_scope = block.getFileScope(), - }, - }; + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer @panic("TODO error handling"); + + const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{ + .decl = new_decl_index, + .namespace = new_namespace_index, + } }); + errdefer @panic("TODO error handling"); + + new_decl.val = opaque_ty.toValue(); + new_namespace.ty = opaque_ty.toType(); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); @@ -19214,7 +19217,7 @@ fn zirReify( } const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); @@ -19248,11 +19251,11 @@ fn zirReify( .zir_index = inst, .layout = layout, .status = .have_field_types, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = union_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; // Tag type @@ -19301,7 +19304,7 @@ fn zirReify( if (!enum_has_field) { const msg = msg: { const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; @@ -19324,7 +19327,7 @@ fn zirReify( if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19334,10 +19337,10 @@ fn zirReify( if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .union_field); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .union_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19346,10 +19349,10 @@ fn zirReify( } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); + try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19362,7 +19365,7 @@ fn zirReify( if (names.count() > 0) { const msg = msg: { const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const enum_ty = union_obj.tag_ty; for (names.keys()) |field_name| { @@ -19513,11 +19516,11 @@ fn reifyStruct( .status = .have_field_types, .known_non_opv = false, .is_tuple = is_tuple, - .namespace = .{ - .parent = block.namespace, + .namespace = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), .ty = struct_ty, - .file_scope = block.getFileScope(), - }, + .file_scope = block.getFileScope(mod), + }), }; // Fields @@ -19629,7 +19632,7 @@ fn reifyStruct( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .struct_field); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .struct_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19641,7 +19644,7 @@ fn reifyStruct( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); + try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19741,6 +19744,7 @@ fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.In } fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19755,7 +19759,7 @@ fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl), arg_ty, .param_ty); + try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), arg_ty, .param_ty); try sema.addDeclaredHereNote(msg, arg_ty); break :msg msg; @@ -21006,7 +21010,8 @@ fn checkVectorizableBinaryOperands( fn maybeOptionsSrc(sema: *Sema, block: *Block, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { if (base_src == .unneeded) return .unneeded; - return Module.optionsSrc(sema.gpa, sema.mod.declPtr(block.src_decl), base_src, wanted); + const mod = sema.mod; + return mod.optionsSrc(sema.mod.declPtr(block.src_decl), base_src, wanted); } fn resolveExportOptions( @@ -23067,7 +23072,7 @@ fn zirBuiltinExtern( const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl), ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), ty, .other); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -23087,9 +23092,9 @@ fn zirBuiltinExtern( // TODO check duplicate extern - const new_decl_index = try sema.mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); - errdefer sema.mod.destroyDecl(new_decl_index); - const new_decl = sema.mod.declPtr(new_decl_index); + const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); + errdefer mod.destroyDecl(new_decl_index); + const new_decl = mod.declPtr(new_decl_index); new_decl.name = try sema.gpa.dupeZ(u8, options.name); { @@ -23117,12 +23122,12 @@ fn zirBuiltinExtern( new_decl.@"linksection" = null; new_decl.has_tv = true; new_decl.analysis = .complete; - new_decl.generation = sema.mod.generation; + new_decl.generation = mod.generation; try new_decl.finalizeNewArena(&new_decl_arena); } - try sema.mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); try sema.ensureDeclAnalyzed(new_decl_index); const ref = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); @@ -23209,7 +23214,7 @@ fn validateVarType( const msg = try sema.errMsg(block, src, "extern variable cannot have type '{}'", .{var_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), var_ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), var_ty, .other); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -23222,7 +23227,7 @@ fn validateVarType( errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), var_ty); + try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), var_ty); if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) { try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{}); } @@ -23939,11 +23944,12 @@ fn safetyPanic( block: *Block, panic_id: PanicId, ) CompileError!void { + const mod = sema.mod; const panic_messages_ty = try sema.getBuiltinType("panic_messages"); const msg_decl_index = (try sema.namespaceLookup( block, sema.src, - panic_messages_ty.getNamespace().?, + panic_messages_ty.getNamespaceIndex(mod).unwrap().?, @tagName(panic_id), )).?; @@ -24006,7 +24012,7 @@ fn fieldVal( ); } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ + const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = ptr_info.pointee_type.childType(mod), .sentinel = ptr_info.sentinel, .@"align" = ptr_info.@"align", @@ -24025,7 +24031,7 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + .{ field_name, object_ty.fmt(mod) }, ); } }, @@ -24049,7 +24055,7 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + .{ field_name, object_ty.fmt(mod) }, ); } } @@ -24071,14 +24077,14 @@ fn fieldVal( } const msg = msg: { const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(sema.mod), + field_name, child_type.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, child_type); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else (try sema.mod.getErrorValue(field_name)).key; + } else (try mod.getErrorValue(field_name)).key; return sema.addConstant( if (!child_type.isAnyError()) @@ -24089,7 +24095,7 @@ fn fieldVal( ); }, .Union => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } @@ -24107,7 +24113,7 @@ fn fieldVal( return sema.failWithBadMemberAccess(block, union_ty, field_name_src, field_name); }, .Enum => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } @@ -24119,7 +24125,7 @@ fn fieldVal( return sema.addConstant(try child_type.copy(arena), enum_val); }, .Struct, .Opaque => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } @@ -24128,7 +24134,7 @@ fn fieldVal( }, else => { const msg = msg: { - const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(mod)}); errdefer msg.destroy(sema.gpa); if (child_type.isSlice(mod)) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); @@ -24174,7 +24180,7 @@ fn fieldPtr( const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { .Pointer => object_ptr_ty.childType(mod), - else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), + else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(mod)}), }; // Zig allows dereferencing a single pointer during field lookup. Note that @@ -24202,7 +24208,7 @@ fn fieldPtr( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + .{ field_name, object_ty.fmt(mod) }, ); } }, @@ -24218,7 +24224,7 @@ fn fieldPtr( const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); const slice_ptr_ty = inner_ty.slicePtrFieldType(buf, mod); - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ + const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = slice_ptr_ty, .mutable = attr_ptr_ty.ptrIsMutable(mod), .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), @@ -24239,7 +24245,7 @@ fn fieldPtr( return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); } else if (mem.eql(u8, field_name, "len")) { - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ + const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = Type.usize, .mutable = attr_ptr_ty.ptrIsMutable(mod), .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), @@ -24264,7 +24270,7 @@ fn fieldPtr( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + .{ field_name, object_ty.fmt(mod) }, ); } }, @@ -24287,9 +24293,9 @@ fn fieldPtr( break :blk entry.key_ptr.*; } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(sema.mod), + field_name, child_type.fmt(mod), }); - } else (try sema.mod.getErrorValue(field_name)).key; + } else (try mod.getErrorValue(field_name)).key; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -24303,7 +24309,7 @@ fn fieldPtr( )); }, .Union => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } @@ -24324,7 +24330,7 @@ fn fieldPtr( return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, .Enum => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } @@ -24342,14 +24348,14 @@ fn fieldPtr( )); }, .Struct, .Opaque => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, - else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}), + else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(mod)}), } }, .Struct => { @@ -24398,7 +24404,7 @@ fn fieldCallBind( const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) raw_ptr_ty.childType(mod) else - return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); + return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(mod)}); // Optionally dereference a second pointer to get the concrete type. const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One; @@ -24458,7 +24464,7 @@ fn fieldCallBind( // If we get here, we need to look for a decl in the struct type instead. const found_decl = switch (concrete_ty.zigTypeTag(mod)) { .Struct, .Opaque, .Union, .Enum => found_decl: { - if (concrete_ty.getNamespace()) |namespace| { + if (concrete_ty.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| { try sema.addReferencedBy(block, src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); @@ -24472,7 +24478,7 @@ fn fieldCallBind( first_param_type.zigTypeTag(mod) == .Pointer and (first_param_type.ptrSize(mod) == .One or first_param_type.ptrSize(mod) == .C) and - first_param_type.childType(mod).eql(concrete_ty, sema.mod))) + first_param_type.childType(mod).eql(concrete_ty, mod))) { // zig fmt: on // Note that if the param type is generic poison, we know that it must @@ -24484,7 +24490,7 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = object_ptr, } }; - } else if (first_param_type.eql(concrete_ty, sema.mod)) { + } else if (first_param_type.eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, @@ -24492,7 +24498,7 @@ fn fieldCallBind( } }; } else if (first_param_type.zigTypeTag(mod) == .Optional) { const child = first_param_type.optionalChild(mod); - if (child.eql(concrete_ty, sema.mod)) { + if (child.eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, @@ -24500,7 +24506,7 @@ fn fieldCallBind( } }; } else if (child.zigTypeTag(mod) == .Pointer and child.ptrSize(mod) == .One and - child.childType(mod).eql(concrete_ty, sema.mod)) + child.childType(mod).eql(concrete_ty, mod)) { return .{ .method = .{ .func_inst = decl_val, @@ -24508,7 +24514,7 @@ fn fieldCallBind( } }; } } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and - first_param_type.errorUnionPayload().eql(concrete_ty, sema.mod)) + first_param_type.errorUnionPayload().eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ @@ -24526,12 +24532,12 @@ fn fieldCallBind( }; const msg = msg: { - const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ field_name, concrete_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ field_name, concrete_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, concrete_ty); if (found_decl) |decl_idx| { - const decl = sema.mod.declPtr(decl_idx); - try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "'{s}' is not a member function", .{field_name}); + const decl = mod.declPtr(decl_idx); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{s}' is not a member function", .{field_name}); } break :msg msg; }; @@ -24549,7 +24555,7 @@ fn finishFieldCallBind( ) CompileError!ResolvedFieldCallee { const mod = sema.mod; const arena = sema.arena; - const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(arena, mod, .{ .pointee_type = field_ty, .mutable = ptr_ty.ptrIsMutable(mod), .@"addrspace" = ptr_ty.ptrAddressSpace(mod), @@ -24583,19 +24589,20 @@ fn namespaceLookup( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, + namespace: Namespace.Index, decl_name: []const u8, ) CompileError!?Decl.Index { + const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { - const decl = sema.mod.declPtr(decl_index); - if (!decl.is_pub and decl.getFileScope() != block.getFileScope()) { + const decl = mod.declPtr(decl_index); + if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ decl_name, }); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{}); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -24609,7 +24616,7 @@ fn namespaceLookupRef( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, + namespace: Namespace.Index, decl_name: []const u8, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; @@ -24621,7 +24628,7 @@ fn namespaceLookupVal( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, + namespace: Namespace.Index, decl_name: []const u8, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; @@ -24692,7 +24699,7 @@ fn structFieldPtrByIndex( .@"addrspace" = struct_ptr_ty_info.@"addrspace", }; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); if (struct_obj.layout == .Packed) { comptime assert(Type.packed_struct_layout_version == 2); @@ -24746,7 +24753,7 @@ fn structFieldPtrByIndex( ptr_ty_data.@"align" = field.abi_align; } - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); + const ptr_field_ty = try Type.ptr(sema.arena, mod, ptr_ty_data); if (field.is_comptime) { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ @@ -24848,16 +24855,17 @@ fn tupleFieldIndex( field_name: []const u8, field_name_src: LazySrcLoc, ) CompileError!u32 { + const mod = sema.mod; assert(!std.mem.eql(u8, field_name, "len")); if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { if (field_index < tuple_ty.structFieldCount()) return field_index; return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ - field_name, tuple_ty.fmt(sema.mod), + field_name, tuple_ty.fmt(mod), }); } else |_| {} return sema.fail(block, field_name_src, "no field named '{s}' in tuple '{}'", .{ - field_name, tuple_ty.fmt(sema.mod), + field_name, tuple_ty.fmt(mod), }); } @@ -24913,7 +24921,7 @@ fn unionFieldPtr( const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(arena, mod, .{ .pointee_type = field.ty, .mutable = union_ptr_ty.ptrIsMutable(mod), .@"volatile" = union_ptr_ty.isVolatilePtr(mod), @@ -24947,7 +24955,7 @@ fn unionFieldPtr( .data = enum_field_index, }; const field_tag = Value.initPayload(&field_tag_buf.base); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod); + const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); if (!tag_matches) { const msg = msg: { const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; @@ -25017,7 +25025,7 @@ fn unionFieldVal( .data = enum_field_index, }; const field_tag = Value.initPayload(&field_tag_buf.base); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod); + const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); switch (union_obj.layout) { .Auto => { if (tag_matches) { @@ -25038,7 +25046,7 @@ fn unionFieldVal( if (tag_matches) { return sema.addConstant(field.ty, tag_and_val.val); } else { - const old_ty = union_ty.unionFieldType(tag_and_val.tag, sema.mod); + const old_ty = union_ty.unionFieldType(tag_and_val.tag, mod); if (try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0)) |new_val| { return sema.addConstant(field.ty, new_val); } @@ -25079,7 +25087,7 @@ fn elemPtr( const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { .Pointer => indexable_ptr_ty.childType(mod), - else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), + else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(mod)}), }; try checkIndexable(sema, block, src, indexable_ty); @@ -25124,7 +25132,7 @@ fn elemPtrOneLayerOnly( const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); + const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, mod); const result_ty = try sema.elemPtrType(indexable_ty, index); return sema.addConstant(result_ty, elem_ptr); }; @@ -25170,7 +25178,7 @@ fn elemVal( const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); + const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } @@ -25209,6 +25217,7 @@ fn validateRuntimeElemAccess( parent_ty: Type, parent_src: LazySrcLoc, ) CompileError!void { + const mod = sema.mod; const valid_rt = try sema.validateRunTimeType(elem_ty, false); if (!valid_rt) { const msg = msg: { @@ -25216,12 +25225,12 @@ fn validateRuntimeElemAccess( block, elem_index_src, "values of type '{}' must be comptime-known, but index value is runtime-known", - .{parent_ty.fmt(sema.mod)}, + .{parent_ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, parent_src.toSrcLoc(src_decl), parent_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(msg, parent_src.toSrcLoc(src_decl, mod), parent_ty); break :msg msg; }; @@ -25255,7 +25264,7 @@ fn tupleFieldPtr( } const field_ty = tuple_ty.structFieldType(field_index); - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, .mutable = tuple_ptr_ty.ptrIsMutable(mod), .@"volatile" = tuple_ptr_ty.isVolatilePtr(mod), @@ -25431,7 +25440,7 @@ fn elemPtrArray( return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, sema.mod); + const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } @@ -25476,7 +25485,7 @@ fn elemValSlice( if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; - const slice_len = slice_val.sliceLen(sema.mod); + const slice_len = slice_val.sliceLen(mod); const slice_len_s = slice_len + @boolToInt(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -25487,7 +25496,7 @@ fn elemValSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, sema.mod); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } @@ -25500,7 +25509,7 @@ fn elemValSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)) + try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -25537,7 +25546,7 @@ fn elemPtrSlice( if (slice_val.isUndef()) { return sema.addConstUndef(elem_ptr_ty); } - const slice_len = slice_val.sliceLen(sema.mod); + const slice_len = slice_val.sliceLen(mod); const slice_len_s = slice_len + @boolToInt(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -25547,7 +25556,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, sema.mod); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -25560,7 +25569,7 @@ fn elemPtrSlice( const len_inst = len: { if (maybe_undef_slice_val) |slice_val| if (!slice_val.isUndef()) - break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)); + break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -25602,16 +25611,17 @@ const CoerceOpts = struct { fn get(info: @This(), sema: *Sema) !?Module.SrcLoc { if (info.func_inst == .none) return null; + const mod = sema.mod; const fn_decl = (try sema.funcDeclSrc(info.func_inst)) orelse return null; - const param_src = Module.paramSrc(0, sema.gpa, fn_decl, info.param_i); + const param_src = Module.paramSrc(0, mod, fn_decl, info.param_i); if (param_src == .node_offset_param) { return Module.SrcLoc{ - .file_scope = fn_decl.getFileScope(), + .file_scope = fn_decl.getFileScope(mod), .parent_decl_node = fn_decl.src_node, .lazy = LazySrcLoc.nodeOffset(param_src.node_offset_param), }; } - return param_src.toSrcLoc(fn_decl); + return param_src.toSrcLoc(fn_decl, mod); } } = .{}, }; @@ -25625,13 +25635,13 @@ fn coerceExtra( opts: CoerceOpts, ) CoersionError!Air.Inst.Ref { if (dest_ty_unresolved.isGenericPoison()) return inst; + const mod = sema.mod; const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst)); - const mod = sema.mod; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); // If the types are the same, we can return the operand. - if (dest_ty.eql(inst_ty, sema.mod)) + if (dest_ty.eql(inst_ty, mod)) return inst; const arena = sema.arena; @@ -26254,7 +26264,7 @@ fn coerceExtra( const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "'noreturn' declared here", .{}); + try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -26287,9 +26297,9 @@ fn coerceExtra( const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); if (inst_ty.isError(mod) and !dest_ty.isError(mod)) { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function cannot return an error", .{}); + try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{}); } else { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function return type declared here", .{}); + try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function return type declared here", .{}); } } @@ -27246,7 +27256,7 @@ fn coerceVarArgParam( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, inst_src.toSrcLoc(src_decl), coerced_ty, .param_ty); + try sema.explainWhyTypeIsNotExtern(msg, inst_src.toSrcLoc(src_decl, mod), coerced_ty, .param_ty); try sema.addDeclaredHereNote(msg, coerced_ty); break :msg msg; @@ -29186,13 +29196,14 @@ fn addReferencedBy( } fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { - const decl = sema.mod.declPtr(decl_index); + const mod = sema.mod; + const decl = mod.declPtr(decl_index); if (decl.analysis == .in_progress) { - const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(), "dependency loop detected", .{}); + const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(mod), "dependency loop detected", .{}); return sema.failWithOwnedErrorMsg(msg); } - sema.mod.ensureDeclAnalyzed(decl_index) catch |err| { + mod.ensureDeclAnalyzed(decl_index) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; } else { @@ -31015,12 +31026,12 @@ fn resolvePeerTypes( // At this point, we hit a compile error. We need to recover // the source locations. const chosen_src = candidate_srcs.resolve( - sema.gpa, + mod, mod.declPtr(block.src_decl), chosen_i, ); const candidate_src = candidate_srcs.resolve( - sema.gpa, + mod, mod.declPtr(block.src_decl), candidate_i + 1, ); @@ -31315,7 +31326,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); - const zir = struct_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -31353,7 +31364,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -31399,7 +31410,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = undefined, .instructions = .{}, .inlining = null, @@ -31522,7 +31533,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_single, .error_set_inferred, .error_set_merged, - .@"opaque", .enum_simple, => false, @@ -31678,6 +31688,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => false, // values, not types .simple_value => unreachable, @@ -31991,6 +32002,8 @@ fn resolveInferredErrorSet( return sema.fail(block, src, "unable to resolve inferred error set", .{}); } + const mod = sema.mod; + // In order to ensure that all dependencies are properly added to the set, we // need to ensure the function body is analyzed of the inferred error set. // However, in the case of comptime/inline function calls with inferred error sets, @@ -32011,7 +32024,7 @@ fn resolveInferredErrorSet( const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); errdefer msg.destroy(sema.gpa); - try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(), msg, "generic function declared here", .{}); + try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(mod), msg, "generic function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -32049,7 +32062,7 @@ fn resolveInferredErrorSetTy( fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; const decl_index = struct_obj.owner_decl; - const zir = struct_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -32123,7 +32136,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -32393,7 +32406,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const gpa = mod.gpa; const decl_index = union_obj.owner_decl; - const zir = union_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; assert(extended.opcode == .union_decl); const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); @@ -32463,7 +32476,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &union_obj.namespace, + .namespace = union_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -32665,7 +32678,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const prev_field_index = union_obj.fields.getIndex(field_name).?; const prev_field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = prev_field_index }).lazy; - try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl), msg, "other field here", .{}); + try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "union declared here", .{}); break :msg msg; }; @@ -32929,7 +32942,7 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { const opt_ty_decl = (try sema.namespaceLookup( &block, src, - builtin_ty.getNamespace().?, + builtin_ty.getNamespaceIndex(mod).unwrap().?, name, )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name}); return sema.analyzeDeclVal(&block, src, opt_ty_decl); @@ -32984,7 +32997,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .function, .array_sentinel, .error_set_inferred, - .@"opaque", .anyframe_T, .pointer, => return null, @@ -33123,7 +33135,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .inferred_alloc_mut => unreachable, }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) { return try mod.intValue(ty, 0); @@ -33131,7 +33143,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .ptr_type => return null, + .ptr_type => null, .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); @@ -33152,7 +33164,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .error_union_type => return null, + .error_union_type => null, .simple_type => |t| switch (t) { .f16, .f32, @@ -33190,18 +33202,19 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .export_options, .extern_options, .type_info, - => return null, + => null, - .void => return Value.void, - .noreturn => return Value.@"unreachable", - .null => return Value.null, - .undefined => return Value.undef, + .void => Value.void, + .noreturn => Value.@"unreachable", + .null => Value.null, + .undefined => Value.undef, .generic_poison => return error.GenericPoison, .var_args_param => unreachable, }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => null, // values, not types .simple_value => unreachable, @@ -33606,7 +33619,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_single, .error_set_inferred, .error_set_merged, - .@"opaque", .enum_simple, => false, @@ -33772,6 +33784,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => false, // values, not types .simple_value => unreachable, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 237a55984e..b484e21424 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -764,8 +764,9 @@ pub fn deinit(func: *CodeGen) void { /// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { + const mod = func.bin_file.base.options.module.?; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(func.decl); + const src_loc = src.toSrcLoc(func.decl, mod); func.err_msg = try Module.ErrorMsg.create(func.gpa, src_loc, fmt, args); return error.CodegenFail; } @@ -6799,7 +6800,7 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { const mod = func.bin_file.base.options.module.?; - const enum_decl_index = enum_ty.getOwnerDecl(); + const enum_decl_index = enum_ty.getOwnerDecl(mod); var arena_allocator = std.heap.ArenaAllocator.init(func.gpa); defer arena_allocator.deinit(); diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index bfa5324dc6..45ad1d7eb3 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -254,7 +254,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { @setCold(true); std.debug.assert(emit.error_msg == null); const mod = emit.bin_file.base.options.module.?; - emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args); + emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(mod), format, args); return error.EmitFail; } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 1cfed06ff1..4fb5267cb0 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -112,10 +112,10 @@ const Owner = union(enum) { mod_fn: *const Module.Fn, lazy_sym: link.File.LazySymbol, - fn getDecl(owner: Owner) Module.Decl.Index { + fn getDecl(owner: Owner, mod: *Module) Module.Decl.Index { return switch (owner) { .mod_fn => |mod_fn| mod_fn.owner_decl, - .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(), + .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(mod), }; } @@ -7926,6 +7926,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { } fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { + const mod = self.bin_file.options.module.?; switch (self.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { @@ -7944,7 +7945,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genArgDbgInfo(name, ty, self.owner.getDecl(), loc); + try dw.genArgDbgInfo(name, ty, self.owner.getDecl(mod), loc); }, .plan9 => {}, .none => {}, @@ -7958,6 +7959,7 @@ fn genVarDbgInfo( mcv: MCValue, name: [:0]const u8, ) !void { + const mod = self.bin_file.options.module.?; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -7988,7 +7990,7 @@ fn genVarDbgInfo( // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genVarDbgInfo(name, ty, self.owner.getDecl(), is_ptr, loc); + try dw.genVarDbgInfo(name, ty, self.owner.getDecl(mod), is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -10936,7 +10938,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { try self.genLazySymbolRef( .call, .rax, - link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod), + link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(mod), mod), ); return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); @@ -11651,7 +11653,8 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV } fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { - return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getDecl())) { + const mod = self.bin_file.options.module.?; + return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getDecl(mod))) { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 60f2d86a3d..36af222c7e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -524,8 +524,9 @@ pub const DeclGen = struct { fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); + const mod = dg.module; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(dg.decl.?); + const src_loc = src.toSrcLoc(dg.decl.?, mod); dg.error_msg = try Module.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } @@ -6484,6 +6485,7 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const un_op = f.air.instructions.items(.data)[inst].un_op; const inst_ty = f.typeOfIndex(inst); @@ -6495,7 +6497,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const local = try f.allocLocal(inst, inst_ty); try f.writeCValue(writer, local, .Other); try writer.print(" = {s}(", .{ - try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl() }, .{ .tag_name = enum_ty }), + try f.getLazyFnName(.{ .tag_name = enum_ty.getOwnerDecl(mod) }, .{ .tag_name = enum_ty }), }); try f.writeCValue(writer, operand, .Other); try writer.writeAll(");\n"); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 0823400858..799f18e3e4 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1538,7 +1538,7 @@ pub const CType = extern union { .forward, .forward_parameter => { self.storage = .{ .fwd = .{ .base = .{ .tag = if (is_struct) .fwd_struct else .fwd_union }, - .data = ty.getOwnerDecl(), + .data = ty.getOwnerDecl(mod), } }; self.value = .{ .cty = initPayload(&self.storage.fwd) }; }, @@ -1985,7 +1985,7 @@ pub const CType = extern union { const unnamed_pl = try arena.create(Payload.Unnamed); unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{ .fields = fields_pl, - .owner_decl = ty.getOwnerDecl(), + .owner_decl = ty.getOwnerDecl(mod), .id = if (ty.unionTagTypeSafety()) |_| 0 else unreachable, } }; return initPayload(unnamed_pl); @@ -2124,7 +2124,7 @@ pub const CType = extern union { .forward, .forward_parameter, .complete, .parameter, .global => unreachable, .payload => if (ty.unionTagTypeSafety()) |_| { const data = cty.cast(Payload.Unnamed).?.data; - return ty.getOwnerDecl() == data.owner_decl and data.id == 0; + return ty.getOwnerDecl(mod) == data.owner_decl and data.id == 0; } else unreachable, }, @@ -2242,7 +2242,7 @@ pub const CType = extern union { => switch (self.kind) { .forward, .forward_parameter, .complete, .parameter, .global => unreachable, .payload => if (ty.unionTagTypeSafety()) |_| { - autoHash(hasher, ty.getOwnerDecl()); + autoHash(hasher, ty.getOwnerDecl(mod)); autoHash(hasher, @as(u32, 0)); } else unreachable, }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5f013c38ec..10cf66a69a 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1177,7 +1177,7 @@ pub const Object = struct { var di_scope: ?*llvm.DIScope = null; if (dg.object.di_builder) |dib| { - di_file = try dg.object.getDIFile(gpa, decl.src_namespace.file_scope); + di_file = try dg.object.getDIFile(gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; const is_internal_linkage = decl.val.tag() != .extern_fn and @@ -1505,7 +1505,7 @@ pub const Object = struct { return di_type; }, .Enum => { - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const owner_decl = o.module.declPtr(owner_decl_index); if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -1558,7 +1558,7 @@ pub const Object = struct { @panic("TODO implement bigint debug enumerators to llvm int for 32-bit compiler builds"); } - const di_file = try o.getDIFile(gpa, owner_decl.src_namespace.file_scope); + const di_file = try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope); const di_scope = try o.namespaceToDebugScope(owner_decl.src_namespace); const name = try ty.nameAlloc(gpa, o.module); @@ -1737,13 +1737,13 @@ pub const Object = struct { } const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const owner_decl = o.module.declPtr(owner_decl_index); const opaque_di_ty = dib.createForwardDeclType( DW.TAG.structure_type, name, try o.namespaceToDebugScope(owner_decl.src_namespace), - try o.getDIFile(gpa, owner_decl.src_namespace.file_scope), + try o.getDIFile(gpa, mod.namespacePtr(owner_decl.src_namespace).file_scope), owner_decl.src_node + 1, ); // The recursive call to `lowerDebugType` va `namespaceToDebugScope` @@ -2085,7 +2085,7 @@ pub const Object = struct { // into. Therefore we can satisfy this by making an empty namespace, // rather than changing the frontend to unnecessarily resolve the // struct field types. - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, struct_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` @@ -2096,7 +2096,7 @@ pub const Object = struct { } if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, struct_di_ty); // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` @@ -2162,7 +2162,7 @@ pub const Object = struct { }, .Union => { const compile_unit_scope = o.di_compile_unit.?.toScope(); - const owner_decl_index = ty.getOwnerDecl(); + const owner_decl_index = ty.getOwnerDecl(mod); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); @@ -2395,8 +2395,10 @@ pub const Object = struct { } } - fn namespaceToDebugScope(o: *Object, namespace: *const Module.Namespace) !*llvm.DIScope { - if (namespace.parent == null) { + fn namespaceToDebugScope(o: *Object, namespace_index: Module.Namespace.Index) !*llvm.DIScope { + const mod = o.module; + const namespace = mod.namespacePtr(namespace_index); + if (namespace.parent == .none) { const di_file = try o.getDIFile(o.gpa, namespace.file_scope); return di_file.toScope(); } @@ -2408,12 +2410,13 @@ pub const Object = struct { /// Assertion `!isa(Scope) && "shouldn't make a namespace scope for a type"' /// when targeting CodeView (Windows). fn makeEmptyNamespaceDIType(o: *Object, decl_index: Module.Decl.Index) !*llvm.DIType { - const decl = o.module.declPtr(decl_index); + const mod = o.module; + const decl = mod.declPtr(decl_index); const fields: [0]*llvm.DIType = .{}; return o.di_builder.?.createStructType( try o.namespaceToDebugScope(decl.src_namespace), decl.name, // TODO use fully qualified name - try o.getDIFile(o.gpa, decl.src_namespace.file_scope), + try o.getDIFile(o.gpa, mod.namespacePtr(decl.src_namespace).file_scope), decl.src_line + 1, 0, // size in bits 0, // align in bits @@ -2434,14 +2437,14 @@ pub const Object = struct { const std_file = (mod.importPkg(std_pkg) catch unreachable).file; const builtin_str: []const u8 = "builtin"; - const std_namespace = mod.declPtr(std_file.root_decl.unwrap().?).src_namespace; + const std_namespace = mod.namespacePtr(mod.declPtr(std_file.root_decl.unwrap().?).src_namespace); const builtin_decl = std_namespace.decls .getKeyAdapted(builtin_str, Module.DeclAdapter{ .mod = mod }).?; const stack_trace_str: []const u8 = "StackTrace"; // buffer is only used for int_type, `builtin` is a struct. const builtin_ty = mod.declPtr(builtin_decl).val.toType(); - const builtin_namespace = builtin_ty.getNamespace().?; + const builtin_namespace = builtin_ty.getNamespace(mod).?; const stack_trace_decl_index = builtin_namespace.decls .getKeyAdapted(stack_trace_str, Module.DeclAdapter{ .mod = mod }).?; const stack_trace_decl = mod.declPtr(stack_trace_decl_index); @@ -2464,7 +2467,8 @@ pub const DeclGen = struct { fn todo(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); assert(self.err_msg == null); - const src_loc = LazySrcLoc.nodeOffset(0).toSrcLoc(self.decl); + const mod = self.module; + const src_loc = LazySrcLoc.nodeOffset(0).toSrcLoc(self.decl, mod); self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, "TODO (LLVM): " ++ format, args); return error.CodegenFail; } @@ -2536,7 +2540,7 @@ pub const DeclGen = struct { } if (dg.object.di_builder) |dib| { - const di_file = try dg.object.getDIFile(dg.gpa, decl.src_namespace.file_scope); + const di_file = try dg.object.getDIFile(dg.gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; const is_internal_linkage = !dg.module.decl_exports.contains(decl_index); @@ -2837,15 +2841,11 @@ pub const DeclGen = struct { .Opaque => { if (t.ip_index == .anyopaque_type) return dg.context.intType(8); - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); if (gop.found_existing) return gop.value_ptr.*; - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - - const opaque_obj = t.castTag(.@"opaque").?.data; - const name = try opaque_obj.getFullyQualifiedName(dg.module); + const opaque_type = mod.intern_pool.indexToKey(t.ip_index).opaque_type; + const name = try mod.opaqueFullyQualifiedName(opaque_type); defer gpa.free(name); const llvm_struct_ty = dg.context.structCreateNamed(name); @@ -2931,7 +2931,7 @@ pub const DeclGen = struct { }, .ErrorSet => return dg.context.intType(16), .Struct => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); if (gop.found_existing) return gop.value_ptr.*; // The Type memory is ephemeral; since we want to store a longer-lived @@ -2999,7 +2999,7 @@ pub const DeclGen = struct { return int_llvm_ty; } - const name = try struct_obj.getFullyQualifiedName(dg.module); + const name = try struct_obj.getFullyQualifiedName(mod); defer gpa.free(name); const llvm_struct_ty = dg.context.structCreateNamed(name); @@ -3057,7 +3057,7 @@ pub const DeclGen = struct { return llvm_struct_ty; }, .Union => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); + const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); if (gop.found_existing) return gop.value_ptr.*; // The Type memory is ephemeral; since we want to store a longer-lived @@ -3080,7 +3080,7 @@ pub const DeclGen = struct { return enum_tag_llvm_ty; } - const name = try union_obj.getFullyQualifiedName(dg.module); + const name = try union_obj.getFullyQualifiedName(mod); defer gpa.free(name); const llvm_union_ty = dg.context.structCreateNamed(name); @@ -6131,7 +6131,7 @@ pub const FuncGen = struct { const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); + const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; const line_number = decl.src_line + 1; const cur_debug_location = self.builder.getCurrentDebugLocation2(); @@ -6193,7 +6193,7 @@ pub const FuncGen = struct { const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const mod = self.dg.module; const decl = mod.declPtr(func.owner_decl); - const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope); + const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; const old = self.dbg_inlined.pop(); self.di_scope = old.scope; @@ -8853,7 +8853,8 @@ pub const FuncGen = struct { } fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { - const enum_decl = enum_ty.getOwnerDecl(); + const mod = self.dg.module; + const enum_decl = enum_ty.getOwnerDecl(mod); // TODO: detect when the type changes and re-emit this function. const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_decl); @@ -8864,7 +8865,6 @@ pub const FuncGen = struct { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const mod = self.dg.module; const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod); defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); @@ -8931,7 +8931,8 @@ pub const FuncGen = struct { } fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { - const enum_decl = enum_ty.getOwnerDecl(); + const mod = self.dg.module; + const enum_decl = enum_ty.getOwnerDecl(mod); // TODO: detect when the type changes and re-emit this function. const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_decl); @@ -8942,7 +8943,6 @@ pub const FuncGen = struct { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const mod = self.dg.module; const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod); defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 843b67e426..52f94cc6d5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -218,8 +218,9 @@ pub const DeclGen = struct { pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); + const mod = self.module; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index)); + const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index), mod); assert(self.error_msg == null); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); return error.CodegenFail; @@ -2775,7 +2776,10 @@ pub const DeclGen = struct { fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; - const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index)); + const src_fname_id = try self.spv.resolveSourceFileName( + self.module, + self.module.declPtr(self.decl_index), + ); try self.func.body.emit(self.spv.gpa, .OpLine, .{ .file = src_fname_id, .line = dbg_stmt.line, @@ -3192,6 +3196,7 @@ pub const DeclGen = struct { } fn airAssembly(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { + const mod = self.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Asm, ty_pl.payload); @@ -3274,7 +3279,7 @@ pub const DeclGen = struct { assert(as.errors.items.len != 0); assert(self.error_msg == null); const loc = LazySrcLoc.nodeOffset(0); - const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index)); + const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index), mod); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len); diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index c5ba429ec9..d53dcb4368 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -390,8 +390,8 @@ pub fn addFunction(self: *Module, decl_index: Decl.Index, func: Fn) !void { /// Fetch the result-id of an OpString instruction that encodes the path of the source /// file of the decl. This function may also emit an OpSource with source-level information regarding /// the decl. -pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef { - const path = decl.getFileScope().sub_file_path; +pub fn resolveSourceFileName(self: *Module, zig_module: *ZigModule, zig_decl: *ZigDecl) !IdRef { + const path = zig_decl.getFileScope(zig_module).sub_file_path; const result = try self.source_file_names.getOrPut(self.gpa, path); if (!result.found_existing) { const file_result_id = self.allocId(); diff --git a/src/crash_report.zig b/src/crash_report.zig index b2e3018de6..57b870c198 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -99,7 +99,7 @@ fn dumpStatusReport() !void { allocator, anal.body, anal.body_index, - block.namespace.file_scope, + mod.namespacePtr(block.namespace).file_scope, block_src_decl.src_node, 6, // indent stderr, @@ -108,7 +108,7 @@ fn dumpStatusReport() !void { else => |e| return e, }; try stderr.writeAll(" For full context, use the command\n zig ast-check -t "); - try writeFilePath(block.namespace.file_scope, stderr); + try writeFilePath(mod.namespacePtr(block.namespace).file_scope, stderr); try stderr.writeAll("\n\n"); var parent = anal.parent; @@ -121,7 +121,7 @@ fn dumpStatusReport() !void { print_zir.renderSingleInstruction( allocator, curr.body[curr.body_index], - curr.block.namespace.file_scope, + mod.namespacePtr(curr.block.namespace).file_scope, curr_block_src_decl.src_node, 6, // indent stderr, @@ -148,7 +148,7 @@ fn writeFilePath(file: *Module.File, stream: anytype) !void { } fn writeFullyQualifiedDeclWithFile(mod: *Module, decl: *Decl, stream: anytype) !void { - try writeFilePath(decl.getFileScope(), stream); + try writeFilePath(decl.getFileScope(mod), stream); try stream.writeAll(": "); try decl.renderFullyQualifiedDebugName(mod, stream); } diff --git a/src/link.zig b/src/link.zig index 471b26ae9f..ac764f06f8 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1129,8 +1129,8 @@ pub const File = struct { Type.anyerror }; } - pub fn getDecl(self: LazySymbol) Module.Decl.OptionalIndex { - return Module.Decl.OptionalIndex.init(self.ty.getOwnerDeclOrNull()); + pub fn getDecl(self: LazySymbol, mod: *Module) Module.Decl.OptionalIndex { + return Module.Decl.OptionalIndex.init(self.ty.getOwnerDeclOrNull(mod)); } }; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 6117f1c1de..4e75cfff97 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1032,20 +1032,20 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void { self.getAtomPtr(atom_index).sym_index = 0; } -pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Coff, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| { - return llvm_object.updateFunc(module, func, air, liveness); + return llvm_object.updateFunc(mod, func, air, liveness); } } const tracy = trace(@src()); defer tracy.end(); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); self.freeUnnamedConsts(decl_index); @@ -1056,7 +1056,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live const res = try codegen.generateFunction( &self.base, - decl.srcLoc(), + decl.srcLoc(mod), func, air, liveness, @@ -1067,7 +1067,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -1076,7 +1076,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.Index) !u32 { @@ -1110,7 +1110,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1); } - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .none, .{ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, }); var code = switch (res) { @@ -1141,19 +1141,19 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In pub fn updateDecl( self: *Coff, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, ) link.File.UpdateDeclError!void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); } const tracy = trace(@src()); defer tracy.end(); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? @@ -1173,7 +1173,7 @@ pub fn updateDecl( defer code_buffer.deinit(); const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -1183,7 +1183,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -1192,7 +1192,7 @@ pub fn updateDecl( // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } fn updateLazySymbolAtom( @@ -1217,8 +1217,8 @@ fn updateLazySymbolAtom( const atom = self.getAtomPtr(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl| - mod.declPtr(owner_decl).srcLoc() + const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| + mod.declPtr(owner_decl).srcLoc(mod) else Module.SrcLoc{ .file_scope = undefined, @@ -1262,7 +1262,8 @@ fn updateLazySymbolAtom( } pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Atom.Index { - const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); + const mod = self.base.options.module.?; + const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { @@ -1277,7 +1278,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Ato metadata.state.* = .pending_flush; const atom = metadata.atom.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.rdata_section_index.?, }); @@ -1411,7 +1412,7 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void { pub fn updateDeclExports( self: *Coff, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) link.File.UpdateDeclExportsError!void { @@ -1423,7 +1424,7 @@ pub fn updateDeclExports( // Even in the case of LLVM, we need to notice certain exported symbols in order to // detect the default subsystem. for (exports) |exp| { - const exported_decl = module.declPtr(exp.exported_decl); + const exported_decl = mod.declPtr(exp.exported_decl); if (exported_decl.getFunction() == null) continue; const winapi_cc = switch (self.base.options.target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, @@ -1433,23 +1434,23 @@ pub fn updateDeclExports( if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and self.base.options.link_libc) { - module.stage1_flags.have_c_main = true; + mod.stage1_flags.have_c_main = true; } else if (decl_cc == winapi_cc and self.base.options.target.os.tag == .windows) { if (mem.eql(u8, exp.options.name, "WinMain")) { - module.stage1_flags.have_winmain = true; + mod.stage1_flags.have_winmain = true; } else if (mem.eql(u8, exp.options.name, "wWinMain")) { - module.stage1_flags.have_wwinmain = true; + mod.stage1_flags.have_wwinmain = true; } else if (mem.eql(u8, exp.options.name, "WinMainCRTStartup")) { - module.stage1_flags.have_winmain_crt_startup = true; + mod.stage1_flags.have_winmain_crt_startup = true; } else if (mem.eql(u8, exp.options.name, "wWinMainCRTStartup")) { - module.stage1_flags.have_wwinmain_crt_startup = true; + mod.stage1_flags.have_wwinmain_crt_startup = true; } else if (mem.eql(u8, exp.options.name, "DllMainCRTStartup")) { - module.stage1_flags.have_dllmain_crt_startup = true; + mod.stage1_flags.have_dllmain_crt_startup = true; } } } - if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports); } const tracy = trace(@src()); @@ -1457,7 +1458,7 @@ pub fn updateDeclExports( const gpa = self.base.allocator; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const atom = self.getAtom(atom_index); const decl_sym = atom.getSymbol(self); @@ -1468,12 +1469,12 @@ pub fn updateDeclExports( if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text")) { - try module.failed_exports.putNoClobber( - module.gpa, + try mod.failed_exports.putNoClobber( + mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: ExportOptions.section", .{}, ), @@ -1483,12 +1484,12 @@ pub fn updateDeclExports( } if (exp.options.linkage == .LinkOnce) { - try module.failed_exports.putNoClobber( - module.gpa, + try mod.failed_exports.putNoClobber( + mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", .{}, ), diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index c971b5b26f..0561ccbfda 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2597,7 +2597,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { fn addDIFile(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !u28 { const decl = mod.declPtr(decl_index); - const file_scope = decl.getFileScope(); + const file_scope = decl.getFileScope(mod); const gop = try self.di_files.getOrPut(self.allocator, file_scope); if (!gop.found_existing) { switch (self.bin_file.tag) { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 7bd36a9b60..c80d60d72a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2414,7 +2414,8 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void { } pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Index { - const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); + const mod = self.base.options.module.?; + const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { @@ -2429,7 +2430,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Inde metadata.state.* = .pending_flush; const atom = metadata.atom.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.rodata_section_index.?, }); @@ -2573,19 +2574,19 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s return local_sym; } -pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Elf, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); } const tracy = trace(@src()); defer tracy.end(); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); self.freeUnnamedConsts(decl_index); @@ -2594,28 +2595,28 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none); const code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_FUNC); if (decl_state) |*ds| { try self.dwarf.?.commitDeclState( - module, + mod, decl_index, local_sym.st_value, local_sym.st_size, @@ -2625,25 +2626,25 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn updateDecl( self: *Elf, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, ) File.UpdateDeclError!void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); } const tracy = trace(@src()); defer tracy.end(); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? @@ -2662,13 +2663,13 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null; + var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); // TODO implement .debug_info for global variables const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ @@ -2677,7 +2678,7 @@ pub fn updateDecl( .parent_atom_index = atom.getSymbolIndex().?, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -2688,7 +2689,7 @@ pub fn updateDecl( .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -2696,7 +2697,7 @@ pub fn updateDecl( const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_OBJECT); if (decl_state) |*ds| { try self.dwarf.?.commitDeclState( - module, + mod, decl_index, local_sym.st_value, local_sym.st_size, @@ -2706,7 +2707,7 @@ pub fn updateDecl( // Since we updated the vaddr and the size, each corresponding export // symbol also needs to be updated. - return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } fn updateLazySymbolAtom( @@ -2735,8 +2736,8 @@ fn updateLazySymbolAtom( const atom = self.getAtom(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl| - mod.declPtr(owner_decl).srcLoc() + const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| + mod.declPtr(owner_decl).srcLoc(mod) else Module.SrcLoc{ .file_scope = undefined, @@ -2812,7 +2813,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module const atom_index = try self.createAtom(); - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), typed_value, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, @@ -2853,7 +2854,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module pub fn updateDeclExports( self: *Elf, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) File.UpdateDeclExportsError!void { @@ -2861,7 +2862,7 @@ pub fn updateDeclExports( @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports); + if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports); } const tracy = trace(@src()); @@ -2869,7 +2870,7 @@ pub fn updateDeclExports( const gpa = self.base.allocator; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const atom = self.getAtom(atom_index); const decl_sym = atom.getSymbol(self); @@ -2881,10 +2882,10 @@ pub fn updateDeclExports( for (exports) |exp| { if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text")) { - try module.failed_exports.ensureUnusedCapacity(module.gpa, 1); - module.failed_exports.putAssumeCapacityNoClobber( + try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); + mod.failed_exports.putAssumeCapacityNoClobber( exp, - try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(mod), "Unimplemented: ExportOptions.section", .{}), ); continue; } @@ -2900,10 +2901,10 @@ pub fn updateDeclExports( }, .Weak => elf.STB_WEAK, .LinkOnce => { - try module.failed_exports.ensureUnusedCapacity(module.gpa, 1); - module.failed_exports.putAssumeCapacityNoClobber( + try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); + mod.failed_exports.putAssumeCapacityNoClobber( exp, - try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}), + try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", .{}), ); continue; }, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 306661c5c5..06f79cf3fb 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1847,18 +1847,18 @@ fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void { self.markRelocsDirtyByTarget(target); } -pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *MachO, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); } const tracy = trace(@src()); defer tracy.end(); const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); self.freeUnnamedConsts(decl_index); @@ -1868,23 +1868,23 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv defer code_buffer.deinit(); var decl_state = if (self.d_sym) |*d_sym| - try d_sym.dwarf.initDeclState(module, decl_index) + try d_sym.dwarf.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none); var code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -1893,7 +1893,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv if (decl_state) |*ds| { try self.d_sym.?.dwarf.commitDeclState( - module, + mod, decl_index, addr, self.getAtom(atom_index).size, @@ -1903,7 +1903,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv // Since we updated the vaddr and the size, each corresponding export symbol also // needs to be updated. - try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + try self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 { @@ -1912,15 +1912,15 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); - const module = self.base.options.module.?; + const mod = self.base.options.module.?; const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = .{}; } const unnamed_consts = gop.value_ptr; - const decl = module.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(module); + const decl = mod.declPtr(decl_index); + const decl_name = try decl.getFullyQualifiedName(mod); defer gpa.free(decl_name); const name_str_index = blk: { @@ -1935,20 +1935,19 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu const atom_index = try self.createAtom(); - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), typed_value, &code_buffer, .none, .{ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, }); var code = switch (res) { .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); log.err("{s}", .{em.msg}); return error.CodegenFail; }, }; - const mod = self.base.options.module.?; const required_alignment = typed_value.ty.abiAlignment(mod); const atom = self.getAtomPtr(atom_index); atom.size = code.len; @@ -1972,17 +1971,17 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu return atom.getSymbolIndex().?; } -pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void { +pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index); + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index); } const tracy = trace(@src()); defer tracy.end(); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? @@ -1998,7 +1997,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) payload.data.is_threadlocal and !self.base.options.single_threaded else false; - if (is_threadlocal) return self.updateThreadlocalVariable(module, decl_index); + if (is_threadlocal) return self.updateThreadlocalVariable(mod, decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const sym_index = self.getAtom(atom_index).getSymbolIndex().?; @@ -2008,14 +2007,14 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) defer code_buffer.deinit(); var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym| - try d_sym.dwarf.initDeclState(module, decl_index) + try d_sym.dwarf.initDeclState(mod, decl_index) else null; defer if (decl_state) |*ds| ds.deinit(); const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ @@ -2024,7 +2023,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) .parent_atom_index = sym_index, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -2035,7 +2034,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -2043,7 +2042,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) if (decl_state) |*ds| { try self.d_sym.?.dwarf.commitDeclState( - module, + mod, decl_index, addr, self.getAtom(atom_index).size, @@ -2053,7 +2052,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) // Since we updated the vaddr and the size, each corresponding export symbol also // needs to be updated. - try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); + try self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index)); } fn updateLazySymbolAtom( @@ -2082,8 +2081,8 @@ fn updateLazySymbolAtom( const atom = self.getAtomPtr(atom_index); const local_sym_index = atom.getSymbolIndex().?; - const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl| - mod.declPtr(owner_decl).srcLoc() + const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| + mod.declPtr(owner_decl).srcLoc(mod) else Module.SrcLoc{ .file_scope = undefined, @@ -2127,7 +2126,8 @@ fn updateLazySymbolAtom( } pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.Index { - const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl()); + const mod = self.base.options.module.?; + const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod)); errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); if (!gop.found_existing) gop.value_ptr.* = .{}; const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { @@ -2145,7 +2145,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In metadata.state.* = .pending_flush; const atom = metadata.atom.*; // anyerror needs to be deferred until flushModule - if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { + if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) { .code => self.text_section_index.?, .const_data => self.data_const_section_index.?, }); @@ -2179,7 +2179,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D const decl_metadata = self.decls.get(decl_index).?; const decl_val = decl.val.castTag(.variable).?.data.init; const res = if (decl_state) |*ds| - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ @@ -2188,7 +2188,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D .parent_atom_index = init_sym_index, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .none, .{ @@ -2379,7 +2379,7 @@ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: Module.De pub fn updateDeclExports( self: *MachO, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) File.UpdateDeclExportsError!void { @@ -2388,7 +2388,7 @@ pub fn updateDeclExports( } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| - return llvm_object.updateDeclExports(module, decl_index, exports); + return llvm_object.updateDeclExports(mod, decl_index, exports); } const tracy = trace(@src()); @@ -2396,7 +2396,7 @@ pub fn updateDeclExports( const gpa = self.base.allocator; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const atom_index = try self.getOrCreateAtomForDecl(decl_index); const atom = self.getAtom(atom_index); const decl_sym = atom.getSymbol(self); @@ -2410,12 +2410,12 @@ pub fn updateDeclExports( if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, "__text")) { - try module.failed_exports.putNoClobber( - module.gpa, + try mod.failed_exports.putNoClobber( + mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: ExportOptions.section", .{}, ), @@ -2425,12 +2425,12 @@ pub fn updateDeclExports( } if (exp.options.linkage == .LinkOnce) { - try module.failed_exports.putNoClobber( - module.gpa, + try mod.failed_exports.putNoClobber( + mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", .{}, ), @@ -2474,9 +2474,9 @@ pub fn updateDeclExports( // TODO: this needs rethinking const global = self.getGlobal(exp_name).?; if (sym_loc.sym_index != global.sym_index and global.file != null) { - _ = try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create( + _ = try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), \\LinkError: symbol '{s}' defined multiple times , .{exp_name}, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 7a389a789d..968cbb0e7e 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -213,14 +213,14 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void { const gpa = self.base.allocator; const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope()); + const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope(mod)); if (fn_map_res.found_existing) { if (try fn_map_res.value_ptr.functions.fetchPut(gpa, decl_index, out)) |old_entry| { gpa.free(old_entry.value.code); gpa.free(old_entry.value.lineinfo); } } else { - const file = decl.getFileScope(); + const file = decl.getFileScope(mod); const arena = self.path_arena.allocator(); // each file gets a symbol fn_map_res.value_ptr.* = .{ @@ -276,13 +276,13 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi } } -pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Plan9, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } const decl_index = func.owner_decl; - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); self.freeUnnamedConsts(decl_index); _ = try self.seeDecl(decl_index); @@ -298,7 +298,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv const res = try codegen.generateFunction( &self.base, - decl.srcLoc(), + decl.srcLoc(mod), func, air, liveness, @@ -316,7 +316,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv .ok => try code_buffer.toOwnedSlice(), .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -366,7 +366,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I }; self.syms.items[info.sym_index.?] = sym; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .{ .none = {}, }, .{ .parent_atom_index = @enumToInt(decl_index), @@ -388,8 +388,8 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I return @intCast(u32, info.got_index.?); } -pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) !void { - const decl = module.declPtr(decl_index); +pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void { + const decl = mod.declPtr(decl_index); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? @@ -409,7 +409,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) defer code_buffer.deinit(); const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ .none = {} }, .{ @@ -419,7 +419,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) .ok => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl_index, em); + try mod.failed_decls.put(mod.gpa, decl_index, em); return; }, }; @@ -707,7 +707,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const code = blk: { const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn; if (is_fn) { - const table = self.fn_decl_table.get(source_decl.getFileScope()).?.functions; + const table = self.fn_decl_table.get(source_decl.getFileScope(mod)).?.functions; const output = table.get(source_decl_index).?; break :blk output.code; } else { @@ -729,7 +729,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No } fn addDeclExports( self: *Plan9, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { @@ -740,9 +740,9 @@ fn addDeclExports( // plan9 does not support custom sections if (exp.options.section) |section_name| { if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { - try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create( + try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( self.base.allocator, - module.declPtr(decl_index).srcLoc(), + mod.declPtr(decl_index).srcLoc(mod), "plan9 does not support extra sections", .{}, )); @@ -773,7 +773,7 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { const decl = mod.declPtr(decl_index); const is_fn = (decl.val.tag() == .function); if (is_fn) { - var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope()).?; + var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?; var submap = symidx_and_submap.functions; if (submap.fetchSwapRemove(decl_index)) |removed_entry| { self.base.allocator.free(removed_entry.value.code); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index fb7ca3a87f..ddf5130fd2 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1348,7 +1348,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes defer code_writer.deinit(); // const result = try codegen.generateFunction( // &wasm.base, - // decl.srcLoc(), + // decl.srcLoc(mod), // func, // air, // liveness, @@ -1357,7 +1357,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes // ); const result = try codegen.generateFunction( &wasm.base, - decl.srcLoc(), + decl.srcLoc(mod), func, air, liveness, @@ -1425,7 +1425,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi const res = try codegen.generateSymbol( &wasm.base, - decl.srcLoc(), + decl.srcLoc(mod), .{ .ty = decl.ty, .val = val }, &code_writer, .none, @@ -1554,7 +1554,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In const result = try codegen.generateSymbol( &wasm.base, - decl.srcLoc(), + decl.srcLoc(mod), tv, &value_bytes, .none, @@ -1693,7 +1693,7 @@ pub fn updateDeclExports( if (exp.options.section) |section| { try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( mod.gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: ExportOptions.section '{s}'", .{section}, )); @@ -1712,7 +1712,7 @@ pub fn updateDeclExports( if (!exp_is_weak and !existing_sym.isWeak()) { try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( mod.gpa, - decl.srcLoc(), + decl.srcLoc(mod), \\LinkError: symbol '{s}' defined multiple times \\ first definition in '{s}' \\ next definition in '{s}' @@ -1745,7 +1745,7 @@ pub fn updateDeclExports( .LinkOnce => { try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( mod.gpa, - decl.srcLoc(), + decl.srcLoc(mod), "Unimplemented: LinkOnce", .{}, )); diff --git a/src/type.zig b/src/type.zig index e7dad91422..2870b5616f 100644 --- a/src/type.zig +++ b/src/type.zig @@ -42,8 +42,6 @@ pub const Type = struct { .error_set_merged, => return .ErrorSet, - .@"opaque" => return .Opaque, - .function => return .Fn, .array, @@ -87,6 +85,7 @@ pub const Type = struct { .error_union_type => return .ErrorUnion, .struct_type => return .Struct, .union_type => return .Union, + .opaque_type => return .Opaque, .simple_type => |s| switch (s) { .f16, .f32, @@ -361,12 +360,6 @@ pub const Type = struct { return true; }, - .@"opaque" => { - const opaque_obj_a = a.castTag(.@"opaque").?.data; - const opaque_obj_b = (b.castTag(.@"opaque") orelse return false).data; - return opaque_obj_a == opaque_obj_b; - }, - .function => { if (b.zigTypeTag(mod) != .Fn) return false; @@ -649,12 +642,6 @@ pub const Type = struct { std.hash.autoHash(hasher, ies); }, - .@"opaque" => { - std.hash.autoHash(hasher, std.builtin.TypeId.Opaque); - const opaque_obj = ty.castTag(.@"opaque").?.data; - std.hash.autoHash(hasher, opaque_obj); - }, - .function => { std.hash.autoHash(hasher, std.builtin.TypeId.Fn); @@ -974,7 +961,6 @@ pub const Type = struct { .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple), .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered), .enum_full, .enum_nonexhaustive => return self.copyPayloadShallow(allocator, Payload.EnumFull), - .@"opaque" => return self.copyPayloadShallow(allocator, Payload.Opaque), } } @@ -1079,12 +1065,6 @@ pub const Type = struct { @tagName(t), enum_numbered.owner_decl, }); }, - .@"opaque" => { - const opaque_obj = ty.castTag(.@"opaque").?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), opaque_obj.owner_decl, - }); - }, .function => { const payload = ty.castTag(.function).?.data; @@ -1303,11 +1283,6 @@ pub const Type = struct { const decl = mod.declPtr(enum_numbered.owner_decl); try decl.renderFullyQualifiedName(mod, writer); }, - .@"opaque" => { - const opaque_obj = ty.cast(Payload.Opaque).?.data; - const decl = mod.declPtr(opaque_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, .error_set_inferred => { const func = ty.castTag(.error_set_inferred).?.data.func; @@ -1575,6 +1550,10 @@ pub const Type = struct { .simple_type => |s| return writer.writeAll(@tagName(s)), .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => |opaque_type| { + const decl = mod.declPtr(opaque_type.decl); + try decl.renderFullyQualifiedName(mod, writer); + }, // values, not types .simple_value => unreachable, @@ -1622,7 +1601,6 @@ pub const Type = struct { .none => switch (ty.tag()) { .error_set_inferred, - .@"opaque", .error_set_single, .error_union, .error_set, @@ -1759,8 +1737,8 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.bits != 0, + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| int_type.bits != 0, .ptr_type => |ptr_type| { // Pointers to zero-bit types still have a runtime address; however, pointers // to comptime-only types do not, with the exception of function pointers. @@ -1797,7 +1775,7 @@ pub const Type = struct { } }, .error_union_type => @panic("TODO"), - .simple_type => |t| return switch (t) { + .simple_type => |t| switch (t) { .f16, .f32, .f64, @@ -1848,6 +1826,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => true, // values, not types .simple_value => unreachable, @@ -1876,7 +1855,6 @@ pub const Type = struct { .error_set_single, .error_set_inferred, .error_set_merged, - .@"opaque", // These are function bodies, not function pointers. .function, .enum_simple, @@ -1960,6 +1938,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => false, // values, not types .simple_value => unreachable, @@ -2144,8 +2123,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - .@"opaque" => return AbiAlignmentAdvanced{ .scalar = 1 }, - // represents machine code; not a pointer .function => { const alignment = ty.castTag(.function).?.data.alignment; @@ -2362,6 +2339,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, // values, not types .simple_value => unreachable, @@ -2536,7 +2514,6 @@ pub const Type = struct { .none => switch (ty.tag()) { .function => unreachable, // represents machine code; not a pointer - .@"opaque" => unreachable, // no size available .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, @@ -2777,6 +2754,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => unreachable, // no size available // values, not types .simple_value => unreachable, @@ -2948,6 +2926,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => unreachable, // values, not types .simple_value => unreachable, @@ -2965,7 +2944,6 @@ pub const Type = struct { .empty_struct => unreachable, .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .@"opaque" => unreachable, .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -3806,6 +3784,7 @@ pub const Type = struct { .simple_type => unreachable, // handled via Index enum tag above .struct_type => @panic("TODO"), .union_type => unreachable, + .opaque_type => unreachable, // values, not types .simple_value => unreachable, @@ -4004,7 +3983,6 @@ pub const Type = struct { .function, .array_sentinel, .error_set_inferred, - .@"opaque", .anyframe_T, .pointer, => return null, @@ -4182,6 +4160,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => return null, // values, not types .simple_value => unreachable, @@ -4208,7 +4187,6 @@ pub const Type = struct { .error_set_single, .error_set_inferred, .error_set_merged, - .@"opaque", .enum_simple, => false, @@ -4350,6 +4328,7 @@ pub const Type = struct { }, .struct_type => @panic("TODO"), .union_type => @panic("TODO"), + .opaque_type => false, // values, not types .simple_value => unreachable, @@ -4399,21 +4378,31 @@ pub const Type = struct { } /// Returns null if the type has no namespace. - pub fn getNamespace(self: Type) ?*Module.Namespace { - return switch (self.tag()) { - .@"struct" => &self.castTag(.@"struct").?.data.namespace, - .enum_full => &self.castTag(.enum_full).?.data.namespace, - .enum_nonexhaustive => &self.castTag(.enum_nonexhaustive).?.data.namespace, - .empty_struct => self.castTag(.empty_struct).?.data, - .@"opaque" => &self.castTag(.@"opaque").?.data.namespace, - .@"union" => &self.castTag(.@"union").?.data.namespace, - .union_safety_tagged => &self.castTag(.union_safety_tagged).?.data.namespace, - .union_tagged => &self.castTag(.union_tagged).?.data.namespace, + pub fn getNamespaceIndex(ty: Type, mod: *Module) Module.Namespace.OptionalIndex { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .@"struct" => ty.castTag(.@"struct").?.data.namespace.toOptional(), + .enum_full => ty.castTag(.enum_full).?.data.namespace.toOptional(), + .enum_nonexhaustive => ty.castTag(.enum_nonexhaustive).?.data.namespace.toOptional(), + .empty_struct => @panic("TODO"), + .@"union" => ty.castTag(.@"union").?.data.namespace.toOptional(), + .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.namespace.toOptional(), + .union_tagged => ty.castTag(.union_tagged).?.data.namespace.toOptional(), - else => null, + else => .none, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + else => .none, + }, }; } + /// Returns null if the type has no namespace. + pub fn getNamespace(ty: Type, mod: *Module) ?*Module.Namespace { + return if (getNamespaceIndex(ty, mod).unwrap()) |i| mod.namespacePtr(i) else null; + } + // Works for vectors and vectors of integers. pub fn minInt(ty: Type, arena: Allocator, mod: *Module) !Value { const scalar = try minIntScalar(ty.scalarType(mod), mod); @@ -4911,78 +4900,81 @@ pub const Type = struct { } pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - else => return null, - }; - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.srcLoc(mod); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - return enum_numbered.srcLoc(mod); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.srcLoc(mod); - }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.srcLoc(mod); - }, - .error_set => { - const error_set = ty.castTag(.error_set).?.data; - return error_set.srcLoc(mod); - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.srcLoc(mod); + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .enum_full, .enum_nonexhaustive => { + const enum_full = ty.cast(Payload.EnumFull).?.data; + return enum_full.srcLoc(mod); + }, + .enum_numbered => { + const enum_numbered = ty.castTag(.enum_numbered).?.data; + return enum_numbered.srcLoc(mod); + }, + .enum_simple => { + const enum_simple = ty.castTag(.enum_simple).?.data; + return enum_simple.srcLoc(mod); + }, + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + return struct_obj.srcLoc(mod); + }, + .error_set => { + const error_set = ty.castTag(.error_set).?.data; + return error_set.srcLoc(mod); + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + return union_obj.srcLoc(mod); + }, + + else => return null, }, - .@"opaque" => { - const opaque_obj = ty.cast(Payload.Opaque).?.data; - return opaque_obj.srcLoc(mod); + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), + else => null, }, - - else => return null, } } - pub fn getOwnerDecl(ty: Type) Module.Decl.Index { - return ty.getOwnerDeclOrNull() orelse unreachable; + pub fn getOwnerDecl(ty: Type, mod: *Module) Module.Decl.Index { + return ty.getOwnerDeclOrNull(mod) orelse unreachable; } - pub fn getOwnerDeclOrNull(ty: Type) ?Module.Decl.Index { - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.owner_decl; - }, - .enum_numbered => return ty.castTag(.enum_numbered).?.data.owner_decl, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.owner_decl; - }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.owner_decl; - }, - .error_set => { - const error_set = ty.castTag(.error_set).?.data; - return error_set.owner_decl; - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.owner_decl; + pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .enum_full, .enum_nonexhaustive => { + const enum_full = ty.cast(Payload.EnumFull).?.data; + return enum_full.owner_decl; + }, + .enum_numbered => return ty.castTag(.enum_numbered).?.data.owner_decl, + .enum_simple => { + const enum_simple = ty.castTag(.enum_simple).?.data; + return enum_simple.owner_decl; + }, + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + return struct_obj.owner_decl; + }, + .error_set => { + const error_set = ty.castTag(.error_set).?.data; + return error_set.owner_decl; + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + return union_obj.owner_decl; + }, + + else => return null, }, - .@"opaque" => { - const opaque_obj = ty.cast(Payload.Opaque).?.data; - return opaque_obj.owner_decl; + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => @panic("TODO"), + .union_type => @panic("TODO"), + .opaque_type => |opaque_type| opaque_type.decl, + else => null, }, - - else => return null, } } @@ -5022,7 +5014,6 @@ pub const Type = struct { error_set_inferred, error_set_merged, empty_struct, - @"opaque", @"struct", @"union", union_safety_tagged, @@ -5055,7 +5046,6 @@ pub const Type = struct { .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, - .@"opaque" => Payload.Opaque, .@"struct" => Payload.Struct, .@"union", .union_safety_tagged, .union_tagged => Payload.Union, .enum_full, .enum_nonexhaustive => Payload.EnumFull, @@ -5336,11 +5326,6 @@ pub const Type = struct { data: *Module.Namespace, }; - pub const Opaque = struct { - base: Payload = .{ .tag = .@"opaque" }, - data: *Module.Opaque, - }; - pub const Struct = struct { base: Payload = .{ .tag = .@"struct" }, data: *Module.Struct, -- cgit v1.2.3 From 8297f28546b44afe49bec074733f05e03a3c0e62 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 May 2023 12:16:24 -0700 Subject: stage2: move struct types and aggregate values to InternPool --- src/InternPool.zig | 296 ++++++++--- src/Module.zig | 179 ++++--- src/Sema.zig | 642 ++++++++++++----------- src/TypedValue.zig | 20 +- src/arch/aarch64/CodeGen.zig | 6 +- src/arch/aarch64/abi.zig | 12 +- src/arch/arm/CodeGen.zig | 6 +- src/arch/arm/abi.zig | 12 +- src/arch/riscv64/abi.zig | 4 +- src/arch/sparc64/CodeGen.zig | 4 +- src/arch/wasm/CodeGen.zig | 31 +- src/arch/wasm/abi.zig | 18 +- src/arch/x86_64/CodeGen.zig | 34 +- src/arch/x86_64/abi.zig | 8 +- src/codegen.zig | 6 +- src/codegen/c.zig | 228 ++++---- src/codegen/c/type.zig | 50 +- src/codegen/llvm.zig | 54 +- src/codegen/spirv.zig | 8 +- src/link/Dwarf.zig | 4 +- src/type.zig | 1183 +++++++++++++++++++++--------------------- src/value.zig | 45 +- 22 files changed, 1570 insertions(+), 1280 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index 3708e21ef6..315865c966 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1,5 +1,10 @@ //! All interned objects have both a value and a type. +//! This data structure is self-contained, with the following exceptions: +//! * type_struct via Module.Struct.Index +//! * type_opaque via Module.Namespace.Index and Module.Decl.Index +/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are +/// constructed lazily. map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, @@ -9,6 +14,13 @@ extra: std.ArrayListUnmanaged(u32) = .{}, /// violate the above mechanism. limbs: std.ArrayListUnmanaged(u64) = .{}, +/// Struct objects are stored in this data structure because: +/// * They contain pointers such as the field maps. +/// * They need to be mutated after creation. +allocated_structs: std.SegmentedList(Module.Struct, 0) = .{}, +/// When a Struct object is freed from `allocated_structs`, it is pushed into this stack. +structs_free_list: std.ArrayListUnmanaged(Module.Struct.Index) = .{}, + const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -17,8 +29,7 @@ const BigIntMutable = std.math.big.int.Mutable; const Limb = std.math.big.Limb; const InternPool = @This(); -const DeclIndex = @import("Module.zig").Decl.Index; -const NamespaceIndex = @import("Module.zig").Namespace.Index; +const Module = @import("Module.zig"); const KeyAdapter = struct { intern_pool: *const InternPool, @@ -45,11 +56,20 @@ pub const Key = union(enum) { payload_type: Index, }, simple_type: SimpleType, + /// If `empty_struct_type` is handled separately, then this value may be + /// safely assumed to never be `none`. + struct_type: StructType, + union_type: struct { + fields_len: u32, + // TODO move Module.Union data to InternPool + }, + opaque_type: OpaqueType, + simple_value: SimpleValue, extern_func: struct { ty: Index, /// The Decl that corresponds to the function itself. - decl: DeclIndex, + decl: Module.Decl.Index, /// Library name if specified. /// For example `extern "c" fn write(...) usize` would have 'c' as library name. /// Index into the string table bytes. @@ -62,13 +82,11 @@ pub const Key = union(enum) { ty: Index, tag: BigIntConst, }, - struct_type: StructType, - opaque_type: OpaqueType, - - union_type: struct { - fields_len: u32, - // TODO move Module.Union data to InternPool - }, + /// An instance of a struct, array, or vector. + /// Each element/field stored as an `Index`. + /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, + /// so the slice length will be one more than the type's array length. + aggregate: Aggregate, pub const IntType = std.builtin.Type.Int; @@ -113,16 +131,27 @@ pub const Key = union(enum) { child: Index, }; - pub const StructType = struct { - fields_len: u32, - // TODO move Module.Struct data to InternPool - }; - pub const OpaqueType = struct { /// The Decl that corresponds to the opaque itself. - decl: DeclIndex, + decl: Module.Decl.Index, /// Represents the declarations inside this opaque. - namespace: NamespaceIndex, + namespace: Module.Namespace.Index, + }; + + /// There are three possibilities here: + /// * `@TypeOf(.{})` (untyped empty struct literal) + /// - namespace == .none, index == .none + /// * A struct which has a namepace, but no fields. + /// - index == .none + /// * A struct which has fields as well as a namepace. + pub const StructType = struct { + /// This will be `none` only in the case of `@TypeOf(.{})` + /// (`Index.empty_struct_type`). + namespace: Module.Namespace.OptionalIndex, + /// The `none` tag is used to represent two cases: + /// * `@TypeOf(.{})`, in which case `namespace` will also be `none`. + /// * A struct with no fields, in which case `namespace` will be populated. + index: Module.Struct.OptionalIndex, }; pub const Int = struct { @@ -156,18 +185,24 @@ pub const Key = union(enum) { addr: Addr, pub const Addr = union(enum) { - decl: DeclIndex, + decl: Module.Decl.Index, int: Index, }; }; /// `null` is represented by the `val` field being `none`. pub const Opt = struct { + /// This is the optional type; not the payload type. ty: Index, /// This could be `none`, indicating the optional is `null`. val: Index, }; + pub const Aggregate = struct { + ty: Index, + fields: []const Index, + }; + pub fn hash32(key: Key) u32 { return @truncate(u32, key.hash64()); } @@ -193,8 +228,15 @@ pub const Key = union(enum) { .simple_value, .extern_func, .opt, + .struct_type, => |info| std.hash.autoHash(hasher, info), + .union_type => |union_type| { + _ = union_type; + @panic("TODO"); + }, + .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), + .int => |int| { // Canonicalize all integers by converting them to BigIntConst. var buffer: Key.Int.Storage.BigIntSpace = undefined; @@ -221,16 +263,10 @@ pub const Key = union(enum) { for (enum_tag.tag.limbs) |limb| std.hash.autoHash(hasher, limb); }, - .struct_type => |struct_type| { - if (struct_type.fields_len != 0) { - @panic("TODO"); - } - }, - .union_type => |union_type| { - _ = union_type; - @panic("TODO"); + .aggregate => |aggregate| { + std.hash.autoHash(hasher, aggregate.ty); + for (aggregate.fields) |field| std.hash.autoHash(hasher, field); }, - .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), } } @@ -280,6 +316,10 @@ pub const Key = union(enum) { const b_info = b.opt; return std.meta.eql(a_info, b_info); }, + .struct_type => |a_info| { + const b_info = b.struct_type; + return std.meta.eql(a_info, b_info); + }, .ptr => |a_info| { const b_info = b.ptr; @@ -331,16 +371,6 @@ pub const Key = union(enum) { @panic("TODO"); }, - .struct_type => |a_info| { - const b_info = b.struct_type; - - // TODO: remove this special case for empty_struct - if (a_info.fields_len == 0 and b_info.fields_len == 0) - return true; - - @panic("TODO"); - }, - .union_type => |a_info| { const b_info = b.union_type; @@ -353,6 +383,11 @@ pub const Key = union(enum) { const b_info = b.opaque_type; return a_info.decl == b_info.decl; }, + .aggregate => |a_info| { + const b_info = b.aggregate; + if (a_info.ty != b_info.ty) return false; + return std.mem.eql(Index, a_info.fields, b_info.fields); + }, } } @@ -375,6 +410,7 @@ pub const Key = union(enum) { .opt, .extern_func, .enum_tag, + .aggregate, => |x| return x.ty, .simple_value => |s| switch (s) { @@ -471,6 +507,7 @@ pub const Index = enum(u32) { anyerror_void_error_union_type, generic_poison_type, var_args_param_type, + /// `@TypeOf(.{})` empty_struct_type, /// `undefined` (untyped) @@ -691,7 +728,8 @@ pub const static_keys = [_]Key{ // empty_struct_type .{ .struct_type = .{ - .fields_len = 0, + .namespace = .none, + .index = .none, } }, .{ .simple_value = .undefined }, @@ -792,16 +830,18 @@ pub const Tag = enum(u8) { /// An opaque type. /// data is index of Key.OpaqueType in extra. type_opaque, + /// A struct type. + /// data is Module.Struct.OptionalIndex + /// The `none` tag is used to represent `@TypeOf(.{})`. + type_struct, + /// A struct type that has only a namespace; no fields, and there is no + /// Module.Struct object allocated for it. + /// data is Module.Namespace.Index. + type_struct_ns, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, - /// The SimpleType and SimpleValue enums are exposed via the InternPool API using - /// SimpleType and SimpleValue as the Key data themselves. - /// This tag is for miscellaneous types and values that can be represented with - /// only an enum tag, but will be presented via the API with a different Key. - /// data is SimpleInternal enum value. - simple_internal, /// A pointer to an integer value. /// data is extra index of PtrInt, which contains the type and address. /// Only pointer types are allowed to have this encoding. Optional types must use @@ -809,6 +849,8 @@ pub const Tag = enum(u8) { ptr_int, /// An optional value that is non-null. /// data is Index of the payload value. + /// In order to use this encoding, one must ensure that the `InternPool` + /// already contains the optional type corresponding to this payload. opt_payload, /// An optional value that is null. /// data is Index of the payload type. @@ -859,6 +901,13 @@ pub const Tag = enum(u8) { extern_func, /// A regular function. func, + /// This represents the only possible value for *some* types which have + /// only one possible value. Not all only-possible-values are encoded this way; + /// for example structs which have all comptime fields are not encoded this way. + /// The set of values that are encoded this way is: + /// * A struct which has 0 fields. + /// data is Index of the type, which is known to be zero bits at runtime. + only_possible_value, }; /// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to @@ -912,9 +961,12 @@ pub const SimpleType = enum(u32) { }; pub const SimpleValue = enum(u32) { + /// This is untyped `undefined`. undefined, void, + /// This is untyped `null`. null, + /// This is the untyped empty struct literal: `.{}` empty_struct, true, false, @@ -923,12 +975,6 @@ pub const SimpleValue = enum(u32) { generic_poison, }; -pub const SimpleInternal = enum(u32) { - /// This is the empty struct type. Note that empty_struct value is exposed - /// via SimpleValue. - type_empty_struct, -}; - pub const Pointer = struct { child: Index, sentinel: Index, @@ -1005,7 +1051,7 @@ pub const ErrorUnion = struct { /// 0. field name: null-terminated string index for each fields_len; declaration order pub const EnumSimple = struct { /// The Decl that corresponds to the enum itself. - decl: DeclIndex, + decl: Module.Decl.Index, /// An integer type which is used for the numerical value of the enum. This /// is inferred by Zig to be the smallest power of two unsigned int that /// fits the number of fields. It is stored here to avoid unnecessary @@ -1091,6 +1137,10 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.items.deinit(gpa); ip.extra.deinit(gpa); ip.limbs.deinit(gpa); + + ip.structs_free_list.deinit(gpa); + ip.allocated_structs.deinit(gpa); + ip.* = undefined; } @@ -1167,20 +1217,38 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_enum_simple => @panic("TODO"), .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, - - .simple_internal => switch (@intToEnum(SimpleInternal, data)) { - .type_empty_struct => .{ .struct_type = .{ - .fields_len = 0, - } }, + .type_struct => { + const struct_index = @intToEnum(Module.Struct.OptionalIndex, data); + const namespace = if (struct_index.unwrap()) |i| + ip.structPtrConst(i).namespace.toOptional() + else + .none; + return .{ .struct_type = .{ + .index = struct_index, + .namespace = namespace, + } }; }, + .type_struct_ns => .{ .struct_type = .{ + .index = .none, + .namespace = @intToEnum(Module.Namespace.Index, data).toOptional(), + } }, + .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, } }, - .opt_payload => .{ .opt = .{ - .ty = indexToKey(ip, @intToEnum(Index, data)).typeOf(), - .val = @intToEnum(Index, data), - } }, + .opt_payload => { + const payload_val = @intToEnum(Index, data); + // The existence of `opt_payload` guarantees that the optional type will be + // stored in the `InternPool`. + const opt_ty = ip.getAssumeExists(.{ + .opt_type = indexToKey(ip, payload_val).typeOf(), + }); + return .{ .opt = .{ + .ty = opt_ty, + .val = payload_val, + } }; + }, .ptr_int => { const info = ip.extraData(PtrInt, data); return .{ .ptr = .{ @@ -1225,6 +1293,16 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .float_f128 => @panic("TODO"), .extern_func => @panic("TODO"), .func => @panic("TODO"), + .only_possible_value => { + const ty = @intToEnum(Index, data); + return switch (ip.indexToKey(ty)) { + .struct_type => .{ .aggregate = .{ + .ty = ty, + .fields = &.{}, + } }, + else => unreachable, + }; + }, }; } @@ -1359,12 +1437,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .struct_type => |struct_type| { - if (struct_type.fields_len != 0) { - @panic("TODO"); // handle structs other than empty_struct - } - ip.items.appendAssumeCapacity(.{ - .tag = .simple_internal, - .data = @enumToInt(SimpleInternal.type_empty_struct), + ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{ + .tag = .type_struct, + .data = @enumToInt(i), + } else if (struct_type.namespace.unwrap()) |i| .{ + .tag = .type_struct_ns, + .data = @enumToInt(i), + } else .{ + .tag = .type_struct, + .data = @enumToInt(Module.Struct.OptionalIndex.none), }); }, @@ -1398,6 +1479,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .opt => |opt| { assert(opt.ty != .none); + assert(ip.isOptionalType(opt.ty)); ip.items.appendAssumeCapacity(if (opt.val == .none) .{ .tag = .opt_null, .data = @enumToInt(opt.ty), @@ -1549,10 +1631,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative; try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs); }, + + .aggregate => |aggregate| { + if (aggregate.fields.len == 0) { + ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(aggregate.ty), + }); + return @intToEnum(Index, ip.items.len - 1); + } + @panic("TODO"); + }, } return @intToEnum(Index, ip.items.len - 1); } +pub fn getAssumeExists(ip: InternPool, key: Key) Index { + const adapter: KeyAdapter = .{ .intern_pool = &ip }; + const index = ip.map.getIndexAdapted(key, adapter).?; + return @intToEnum(Index, index); +} + +/// This operation only happens under compile error conditions. +/// Leak the index until the next garbage collection. +pub fn remove(ip: *InternPool, index: Index) void { + _ = ip; + _ = index; + @panic("TODO this is a bit problematic to implement, could we maybe just never support a remove() operation on InternPool?"); +} + fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { const limbs_len = @intCast(u32, limbs.len); try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); @@ -1578,8 +1685,8 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { ip.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), Index => @enumToInt(@field(extra, field.name)), - DeclIndex => @enumToInt(@field(extra, field.name)), - NamespaceIndex => @enumToInt(@field(extra, field.name)), + Module.Decl.Index => @enumToInt(@field(extra, field.name)), + Module.Namespace.Index => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), @@ -1635,8 +1742,8 @@ fn extraData(ip: InternPool, comptime T: type, index: usize) T { @field(result, field.name) = switch (field.type) { u32 => int32, Index => @intToEnum(Index, int32), - DeclIndex => @intToEnum(DeclIndex, int32), - NamespaceIndex => @intToEnum(NamespaceIndex, int32), + Module.Decl.Index => @intToEnum(Module.Decl.Index, int32), + Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32), i32 => @bitCast(i32, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), @@ -1808,6 +1915,20 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al } } +pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex { + const tags = ip.items.items(.tag); + if (val == .none) return .none; + if (tags[@enumToInt(val)] != .type_struct) return .none; + const datas = ip.items.items(.data); + return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); +} + +pub fn isOptionalType(ip: InternPool, ty: Index) bool { + const tags = ip.items.items(.tag); + if (ty == .none) return false; + return tags[@enumToInt(ty)] == .type_optional; +} + pub fn dump(ip: InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } @@ -1859,9 +1980,10 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_error_union => @sizeOf(ErrorUnion), .type_enum_simple => @sizeOf(EnumSimple), .type_opaque => @sizeOf(Key.OpaqueType), + .type_struct => 0, + .type_struct_ns => 0, .simple_type => 0, .simple_value => 0, - .simple_internal => 0, .ptr_int => @sizeOf(PtrInt), .opt_null => 0, .opt_payload => 0, @@ -1887,6 +2009,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .float_f128 => @sizeOf(Float128), .extern_func => @panic("TODO"), .func => @panic("TODO"), + .only_possible_value => 0, }); } const SortContext = struct { @@ -1905,3 +2028,34 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { }); } } + +pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct { + return ip.allocated_structs.at(@enumToInt(index)); +} + +pub fn structPtrConst(ip: InternPool, index: Module.Struct.Index) *const Module.Struct { + return ip.allocated_structs.at(@enumToInt(index)); +} + +pub fn structPtrUnwrapConst(ip: InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { + return structPtrConst(ip, index.unwrap() orelse return null); +} + +pub fn createStruct( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Struct, +) Allocator.Error!Module.Struct.Index { + if (ip.structs_free_list.popOrNull()) |index| return index; + const ptr = try ip.allocated_structs.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Struct.Index, ip.allocated_structs.len - 1); +} + +pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void { + ip.structPtr(index).* = undefined; + ip.structs_free_list.append(gpa, index) catch { + // In order to keep `destroyStruct` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Struct until garbage collection. + }; +} diff --git a/src/Module.zig b/src/Module.zig index 7521d4d439..ada69537f6 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -839,11 +839,14 @@ pub const Decl = struct { /// If the Decl has a value and it is a struct, return it, /// otherwise null. - pub fn getStruct(decl: *Decl) ?*Struct { - if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - const struct_obj = (ty.castTag(.@"struct") orelse return null).data; - return struct_obj; + pub fn getStruct(decl: *Decl, mod: *Module) ?*Struct { + return mod.structPtrUnwrap(getStructIndex(decl, mod)); + } + + pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex { + if (!decl.owns_tv) return .none; + const ty = (decl.val.castTag(.ty) orelse return .none).data; + return mod.intern_pool.indexToStruct(ty.ip_index); } /// If the Decl has a value and it is a union, return it, @@ -884,32 +887,29 @@ pub const Decl = struct { /// Only returns it if the Decl is the owner. pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex { if (!decl.owns_tv) return .none; - if (decl.val.ip_index == .none) { - const ty = (decl.val.castTag(.ty) orelse return .none).data; - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.namespace.toOptional(); - }, - .enum_full, .enum_nonexhaustive => { - const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; - return enum_obj.namespace.toOptional(); - }, - .empty_struct => { - @panic("TODO"); - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - return union_obj.namespace.toOptional(); - }, + switch (decl.val.ip_index) { + .empty_struct_type => return .none, + .none => { + const ty = (decl.val.castTag(.ty) orelse return .none).data; + switch (ty.tag()) { + .enum_full, .enum_nonexhaustive => { + const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; + return enum_obj.namespace.toOptional(); + }, + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + return union_obj.namespace.toOptional(); + }, - else => return .none, - } + else => return .none, + } + }, + else => return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + .struct_type => |struct_type| struct_type.namespace, + else => .none, + }, } - return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { - .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), - else => .none, - }; } /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. @@ -1046,6 +1046,28 @@ pub const Struct = struct { is_tuple: bool, assumed_runtime_bits: bool = false, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + pub const Fields = std.StringArrayHashMapUnmanaged(Field); /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl. @@ -1111,12 +1133,7 @@ pub const Struct = struct { } pub fn srcLoc(s: Struct, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(s.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; + return mod.declPtr(s.owner_decl).srcLoc(mod); } pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc { @@ -3622,6 +3639,16 @@ pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace { return mod.allocated_namespaces.at(@enumToInt(index)); } +pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { + return mod.intern_pool.structPtr(index); +} + +/// This one accepts an index from the InternPool and asserts that it is not +/// the anonymous empty struct type. +pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { + return structPtr(mod, index.unwrap() orelse return null); +} + /// Returns true if and only if the Decl is the top level struct associated with a File. pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { const decl = mod.declPtr(decl_index); @@ -4078,7 +4105,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { if (!decl.owns_tv) continue; - if (decl.getStruct()) |struct_obj| { + if (decl.getStruct(mod)) |struct_obj| { struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; @@ -4597,36 +4624,50 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); - const ty_ty = comptime Type.type; - struct_obj.* = .{ - .owner_decl = undefined, // set below + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_namespace_index = try mod.createNamespace(.{ + .parent = .none, + .ty = undefined, + .file_scope = file, + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0, null); + const new_decl = mod.declPtr(new_decl_index); + errdefer @panic("TODO error handling"); + + const struct_index = try mod.createStruct(.{ + .owner_decl = new_decl_index, .fields = .{}, .zir_index = undefined, // set below .layout = .Auto, .status = .none, .known_non_opv = undefined, .is_tuple = undefined, // set below - .namespace = try mod.createNamespace(.{ - .parent = .none, - .ty = struct_ty, - .file_scope = file, - }), - }; - const new_decl_index = try mod.allocateNewDecl(struct_obj.namespace, 0, null); - const new_decl = mod.declPtr(new_decl_index); + .namespace = new_namespace_index, + }); + errdefer mod.destroyStruct(struct_index); + + const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + errdefer mod.intern_pool.remove(struct_ty); + + new_namespace.ty = struct_ty.toType(); file.root_decl = new_decl_index.toOptional(); - struct_obj.owner_decl = new_decl_index; + new_decl.name = try file.fullyQualifiedNameZ(gpa); new_decl.src_line = 0; new_decl.is_pub = true; new_decl.is_exported = false; new_decl.has_align = false; new_decl.has_linksection_or_addrspace = false; - new_decl.ty = ty_ty; - new_decl.val = struct_val; + new_decl.ty = Type.type; + new_decl.val = struct_ty.toValue(); new_decl.@"align" = 0; new_decl.@"linksection" = null; new_decl.has_tv = true; @@ -4639,6 +4680,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { if (file.status == .success_zir) { assert(file.zir_loaded); const main_struct_inst = Zir.main_struct_inst; + const struct_obj = mod.structPtr(struct_index); struct_obj.zir_index = main_struct_inst; const extended = file.zir.instructions.items(.data)[main_struct_inst].extended; const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -4665,7 +4707,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null); defer wip_captures.deinit(); - if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_obj)) |_| { + if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| { try wip_captures.finalize(); new_decl.analysis = .complete; } else |err| switch (err) { @@ -4761,11 +4803,12 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (mod.declIsRoot(decl_index)) { log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); const main_struct_inst = Zir.main_struct_inst; - const struct_obj = decl.getStruct().?; + const struct_index = decl.getStructIndex(mod).unwrap().?; + const struct_obj = mod.structPtr(struct_index); // This might not have gotten set in `semaFile` if the first time had // a ZIR failure, so we set it here in case. struct_obj.zir_index = main_struct_inst; - try sema.analyzeStructDecl(decl, main_struct_inst, struct_obj); + try sema.analyzeStructDecl(decl, main_struct_inst, struct_index); decl.analysis = .complete; decl.generation = mod.generation; return false; @@ -5970,6 +6013,14 @@ pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { }; } +pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index { + return mod.intern_pool.createStruct(mod.gpa, initialization); +} + +pub fn destroyStruct(mod: *Module, index: Struct.Index) void { + return mod.intern_pool.destroyStruct(mod.gpa, index); +} + pub fn allocateNewDecl( mod: *Module, namespace: Namespace.Index, @@ -7202,12 +7253,7 @@ pub fn atomicPtrAlignment( } pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc { - const owner_decl = mod.declPtr(opaque_type.decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; + return mod.declPtr(opaque_type.decl).srcLoc(mod); } pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) ![:0]u8 { @@ -7221,3 +7267,12 @@ pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File { pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.Index { return mod.namespacePtr(namespace_index).getDeclIndex(mod); } + +/// Returns null in the following cases: +/// * `@TypeOf(.{})` +/// * A struct which has no fields (`struct {}`). +/// * Not a struct. +pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { + const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null; + return mod.structPtr(struct_index); +} diff --git a/src/Sema.zig b/src/Sema.zig index 35440395c4..1f72470f9e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2090,16 +2090,17 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: } fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError { + const mod = sema.mod; const msg = msg: { const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{}); errdefer msg.destroy(sema.gpa); - const struct_ty = container_ty.castTag(.@"struct") orelse break :msg msg; - const default_value_src = struct_ty.data.fieldSrcLoc(sema.mod, .{ + const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg; + const default_value_src = struct_ty.fieldSrcLoc(mod, .{ .index = field_index, .range = .value, }); - try sema.mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{}); + try mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -2632,8 +2633,10 @@ pub fn analyzeStructDecl( sema: *Sema, new_decl: *Decl, inst: Zir.Inst.Index, - struct_obj: *Module.Struct, + struct_index: Module.Struct.Index, ) SemaError!void { + const mod = sema.mod; + const struct_obj = mod.structPtr(struct_index); const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -2662,7 +2665,7 @@ pub fn analyzeStructDecl( } } - _ = try sema.mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl); + _ = try mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl); } fn zirStructDecl( @@ -2671,28 +2674,38 @@ fn zirStructDecl( extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const mod = sema.mod; - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = struct_val, + .val = undefined, }, small.name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - struct_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const struct_index = try mod.createStruct(.{ .owner_decl = new_decl_index, .fields = .{}, .zir_index = inst, @@ -2700,13 +2713,20 @@ fn zirStructDecl( .status = .none, .known_non_opv = undefined, .is_tuple = small.is_tuple, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = struct_ty, - .file_scope = block.getFileScope(mod), - }), - }; - try sema.analyzeStructDecl(new_decl, inst, struct_obj); + .namespace = new_namespace_index, + }); + errdefer mod.destroyStruct(struct_index); + + const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + errdefer mod.intern_pool.remove(struct_ty); + + new_decl.val = struct_ty.toValue(); + new_namespace.ty = struct_ty.toType(); + + try sema.analyzeStructDecl(new_decl, inst, struct_index); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); } @@ -2721,6 +2741,7 @@ fn createAnonymousDeclTypeNamed( inst: ?Zir.Inst.Index, ) !Decl.Index { const mod = sema.mod; + const gpa = sema.gpa; const namespace = block.namespace; const src_scope = block.wip_capture_scope; const src_decl = mod.declPtr(block.src_decl); @@ -2736,16 +2757,16 @@ fn createAnonymousDeclTypeNamed( // semantically analyzed. // This name is also used as the key in the parent namespace so it cannot be // renamed. - const name = try std.fmt.allocPrintZ(sema.gpa, "{s}__{s}_{d}", .{ + const name = try std.fmt.allocPrintZ(gpa, "{s}__{s}_{d}", .{ src_decl.name, anon_prefix, @enumToInt(new_decl_index), }); - errdefer sema.gpa.free(name); + errdefer gpa.free(name); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, .parent => { - const name = try sema.gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); - errdefer sema.gpa.free(name); + const name = try gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); + errdefer gpa.free(name); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2753,7 +2774,7 @@ fn createAnonymousDeclTypeNamed( const fn_info = sema.code.getFnInfo(sema.func.?.zir_body_inst); const zir_tags = sema.code.instructions.items(.tag); - var buf = std.ArrayList(u8).init(sema.gpa); + var buf = std.ArrayList(u8).init(gpa); defer buf.deinit(); try buf.appendSlice(mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); try buf.appendSlice("("); @@ -2781,7 +2802,7 @@ fn createAnonymousDeclTypeNamed( try buf.appendSlice(")"); const name = try buf.toOwnedSliceSentinel(0); - errdefer sema.gpa.free(name); + errdefer gpa.free(name); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2794,10 +2815,10 @@ fn createAnonymousDeclTypeNamed( .dbg_var_ptr, .dbg_var_val => { if (zir_data[i].str_op.operand != ref) continue; - const name = try std.fmt.allocPrintZ(sema.gpa, "{s}.{s}", .{ + const name = try std.fmt.allocPrintZ(gpa, "{s}.{s}", .{ src_decl.name, zir_data[i].str_op.getStr(sema.code), }); - errdefer sema.gpa.free(name); + errdefer gpa.free(name); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; @@ -3216,13 +3237,13 @@ fn zirOpaqueDecl( .file_scope = block.getFileScope(mod), }); const new_namespace = mod.namespacePtr(new_namespace_index); - errdefer @panic("TODO error handling"); + errdefer mod.destroyNamespace(new_namespace_index); const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{ .decl = new_decl_index, .namespace = new_namespace_index, } }); - errdefer @panic("TODO error handling"); + errdefer mod.intern_pool.remove(opaque_ty); new_decl.val = opaque_ty.toValue(); new_namespace.ty = opaque_ty.toType(); @@ -3960,7 +3981,7 @@ fn zirArrayBasePtr( const elem_ty = sema.typeOf(base_ptr).childType(mod); switch (elem_ty.zigTypeTag(mod)) { .Array, .Vector => return base_ptr, - .Struct => if (elem_ty.isTuple()) { + .Struct => if (elem_ty.isTuple(mod)) { // TODO validate element count return base_ptr; }, @@ -4150,7 +4171,7 @@ fn validateArrayInitTy( } return; }, - .Struct => if (ty.isTuple()) { + .Struct => if (ty.isTuple(mod)) { _ = try sema.resolveTypeFields(ty); const array_len = ty.arrayLen(mod); if (extra.init_count > array_len) { @@ -4358,7 +4379,7 @@ fn validateStructInit( const gpa = sema.gpa; // Maps field index to field_ptr index of where it was already initialized. - const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount()); + const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount(mod)); defer gpa.free(found_fields); @memset(found_fields, 0); @@ -4370,7 +4391,7 @@ fn validateStructInit( const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; struct_ptr_zir_ref = field_ptr_extra.lhs; const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); - const field_index = if (struct_ty.isTuple()) + const field_index = if (struct_ty.isTuple(mod)) try sema.tupleFieldIndex(block, struct_ty, field_name, field_src) else try sema.structFieldIndex(block, struct_ty, field_name, field_src); @@ -4403,9 +4424,9 @@ fn validateStructInit( for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) continue; - const default_val = struct_ty.structFieldDefaultValue(i); + const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.ip_index == .unreachable_value) { - if (struct_ty.isTuple()) { + if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4414,7 +4435,7 @@ fn validateStructInit( } continue; } - const field_name = struct_ty.structFieldName(i); + const field_name = struct_ty.structFieldName(i, mod); const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { @@ -4426,7 +4447,7 @@ fn validateStructInit( } const field_src = init_src; // TODO better source location - const default_field_ptr = if (struct_ty.isTuple()) + const default_field_ptr = if (struct_ty.isTuple(mod)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); @@ -4436,11 +4457,11 @@ fn validateStructInit( } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const fqn = try struct_obj.data.getFullyQualifiedName(mod); + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(mod); defer gpa.free(fqn); try mod.errNoteNonLazy( - struct_obj.data.srcLoc(mod), + struct_obj.srcLoc(mod), msg, "struct '{s}' declared here", .{fqn}, @@ -4463,12 +4484,12 @@ fn validateStructInit( // We collect the comptime field values in case the struct initialization // ends up being comptime-known. - const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount()); + const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount(mod)); field: for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) { // Determine whether the value stored to this pointer is comptime-known. - const field_ty = struct_ty.structFieldType(i); + const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { field_values[i] = opv; continue; @@ -4548,9 +4569,9 @@ fn validateStructInit( continue :field; } - const default_val = struct_ty.structFieldDefaultValue(i); + const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.ip_index == .unreachable_value) { - if (struct_ty.isTuple()) { + if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4559,7 +4580,7 @@ fn validateStructInit( } continue; } - const field_name = struct_ty.structFieldName(i); + const field_name = struct_ty.structFieldName(i, mod); const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { @@ -4573,11 +4594,11 @@ fn validateStructInit( } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod); + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(sema.mod); defer gpa.free(fqn); try sema.mod.errNoteNonLazy( - struct_obj.data.srcLoc(sema.mod), + struct_obj.srcLoc(sema.mod), msg, "struct '{s}' declared here", .{fqn}, @@ -4605,7 +4626,7 @@ fn validateStructInit( if (field_ptr != 0) continue; const field_src = init_src; // TODO better source location - const default_field_ptr = if (struct_ty.isTuple()) + const default_field_ptr = if (struct_ty.isTuple(mod)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); @@ -4638,7 +4659,7 @@ fn zirValidateArrayInit( var i = instrs.len; while (i < array_len) : (i += 1) { - const default_val = array_ty.structFieldDefaultValue(i); + const default_val = array_ty.structFieldDefaultValue(i, mod); if (default_val.ip_index == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4698,7 +4719,7 @@ fn zirValidateArrayInit( outer: for (instrs, 0..) |elem_ptr, i| { // Determine whether the value stored to this pointer is comptime-known. - if (array_ty.isTuple()) { + if (array_ty.isTuple(mod)) { if (try array_ty.structFieldValueComptime(mod, i)) |opv| { element_vals[i] = opv; continue; @@ -7950,7 +7971,7 @@ fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs); assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction if (indexable_ty.zigTypeTag(mod) == .Struct) { - const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs)); + const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs), mod); return sema.addType(elem_type); } else { const elem_type = indexable_ty.elemType2(mod); @@ -9822,7 +9843,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }, - .Struct, .Union => if (dest_ty.containerLayout() == .Auto) { + .Struct, .Union => if (dest_ty.containerLayout(mod) == .Auto) { const container = switch (dest_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", @@ -9885,7 +9906,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }, - .Struct, .Union => if (operand_ty.containerLayout() == .Auto) { + .Struct, .Union => if (operand_ty.containerLayout(mod) == .Auto) { const container = switch (operand_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", @@ -12041,12 +12062,12 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (mem.eql(u8, name, field_name)) break true; } else false; } - if (ty.isTuple()) { + if (ty.isTuple(mod)) { const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; - break :hf field_index < ty.structFieldCount(); + break :hf field_index < ty.structFieldCount(mod); } break :hf switch (ty.zigTypeTag(mod)) { - .Struct => ty.structFields().contains(field_name), + .Struct => ty.structFields(mod).contains(field_name), .Union => ty.unionFields().contains(field_name), .Enum => ty.enumFields().contains(field_name), .Array => mem.eql(u8, field_name, "len"), @@ -12601,14 +12622,15 @@ fn analyzeTupleCat( lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const src = LazySrcLoc.nodeOffset(src_node); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node }; - const lhs_len = lhs_ty.structFieldCount(); - const rhs_len = rhs_ty.structFieldCount(); + const lhs_len = lhs_ty.structFieldCount(mod); + const rhs_len = rhs_ty.structFieldCount(mod); const dest_fields = lhs_len + rhs_len; if (dest_fields == 0) { @@ -12629,8 +12651,8 @@ fn analyzeTupleCat( var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i); - const default_val = lhs_ty.structFieldDefaultValue(i); + types[i] = lhs_ty.structFieldType(i, mod); + const default_val = lhs_ty.structFieldDefaultValue(i, mod); values[i] = default_val; const operand_src = lhs_src; // TODO better source location if (default_val.ip_index == .unreachable_value) { @@ -12639,8 +12661,8 @@ fn analyzeTupleCat( } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i); - const default_val = rhs_ty.structFieldDefaultValue(i); + types[i + lhs_len] = rhs_ty.structFieldType(i, mod); + const default_val = rhs_ty.structFieldDefaultValue(i, mod); values[i + lhs_len] = default_val; const operand_src = rhs_src; // TODO better source location if (default_val.ip_index == .unreachable_value) { @@ -12691,8 +12713,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs_ty = sema.typeOf(rhs); const src = inst_data.src(); - const lhs_is_tuple = lhs_ty.isTuple(); - const rhs_is_tuple = rhs_ty.isTuple(); + const lhs_is_tuple = lhs_ty.isTuple(mod); + const rhs_is_tuple = rhs_ty.isTuple(mod); if (lhs_is_tuple and rhs_is_tuple) { return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs); } @@ -12800,8 +12822,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var elem_i: usize = 0; while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; - const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type; - const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.@"unreachable"; + const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i, mod) else lhs_info.elem_type; + const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable"; const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); @@ -12810,8 +12832,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; - const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type; - const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.@"unreachable"; + const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i, mod) else rhs_info.elem_type; + const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable"; const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); @@ -12909,8 +12931,8 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins } }, .Struct => { - if (operand_ty.isTuple() and peer_ty.isIndexable(mod)) { - assert(!peer_ty.isTuple()); + if (operand_ty.isTuple(mod) and peer_ty.isIndexable(mod)) { + assert(!peer_ty.isTuple(mod)); return .{ .elem_type = peer_ty.elemType2(mod), .sentinel = null, @@ -12930,12 +12952,13 @@ fn analyzeTupleMul( operand: Air.Inst.Ref, factor: u64, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); const src = LazySrcLoc.nodeOffset(src_node); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node }; - const tuple_len = operand_ty.structFieldCount(); + const tuple_len = operand_ty.structFieldCount(mod); const final_len_u64 = std.math.mul(u64, tuple_len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); @@ -12951,8 +12974,8 @@ fn analyzeTupleMul( var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < tuple_len) : (i += 1) { - types[i] = operand_ty.structFieldType(i); - values[i] = operand_ty.structFieldDefaultValue(i); + types[i] = operand_ty.structFieldType(i, mod); + values[i] = operand_ty.structFieldDefaultValue(i, mod); const operand_src = lhs_src; // TODO better source location if (values[i].ip_index == .unreachable_value) { runtime_src = operand_src; @@ -13006,7 +13029,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const operator_src: LazySrcLoc = .{ .node_offset_main_token = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - if (lhs_ty.isTuple()) { + if (lhs_ty.isTuple(mod)) { // In `**` rhs must be comptime-known, but lhs can be runtime-known const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known"); return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor); @@ -14502,7 +14525,7 @@ fn zirOverflowArithmetic( const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2); element_refs[0] = result.inst; - element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1), result.overflow_bit); + element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1, mod), result.overflow_bit); return block.addAggregateInit(tuple_ty, element_refs); } @@ -16378,7 +16401,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const union_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout - const layout = union_ty.containerLayout(); + const layout = union_ty.containerLayout(mod); const union_fields = union_ty.unionFields(); const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count()); @@ -16484,7 +16507,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }; const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout - const layout = struct_ty.containerLayout(); + const layout = struct_ty.containerLayout(mod); const struct_field_vals = fv: { if (struct_ty.isSimpleTupleOrAnonStruct()) { @@ -16532,7 +16555,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } break :fv struct_field_vals; } - const struct_fields = struct_ty.structFields(); + const struct_fields = struct_ty.structFields(mod); const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count()); for (struct_field_vals, 0..) |*field_val, i| { @@ -16600,7 +16623,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const backing_integer_val = blk: { if (layout == .Packed) { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); assert(struct_obj.backing_int_ty.isInt(mod)); const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); @@ -16624,7 +16647,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_tuple: bool, - Value.makeBool(struct_ty.isTuple()), + Value.makeBool(struct_ty.isTuple(mod)), }; return sema.addConstant( @@ -17801,12 +17824,13 @@ fn structInitEmpty( dest_src: LazySrcLoc, init_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; // This logic must be synchronized with that in `zirStructInit`. const struct_ty = try sema.resolveTypeFields(obj_ty); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount()); + const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod)); defer gpa.free(field_inits); @memset(field_inits, .none); @@ -17897,18 +17921,18 @@ fn zirStructInit( // Maps field index to field_type index of where it was already initialized. // For making sure all fields are accounted for and no fields are duplicated. - const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount()); + const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(mod)); defer gpa.free(found_fields); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount()); + const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(mod)); defer gpa.free(field_inits); @memset(field_inits, .none); var field_i: u32 = 0; var extra_index = extra.end; - const is_packed = resolved_ty.containerLayout() == .Packed; + const is_packed = resolved_ty.containerLayout(mod) == .Packed; while (field_i < extra.data.fields_len) : (field_i += 1) { const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index); extra_index = item.end; @@ -17917,7 +17941,7 @@ fn zirStructInit( const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); - const field_index = if (resolved_ty.isTuple()) + const field_index = if (resolved_ty.isTuple(mod)) try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src) else try sema.structFieldIndex(block, resolved_ty, field_name, field_src); @@ -17940,7 +17964,7 @@ fn zirStructInit( return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index), sema.mod)) { + if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index); } }; @@ -18029,13 +18053,13 @@ fn finishStructInit( field_inits[i] = try sema.addConstant(struct_obj.types[i], default_val); } } - } else if (struct_ty.isTuple()) { + } else if (struct_ty.isTuple(mod)) { var i: u32 = 0; - const len = struct_ty.structFieldCount(); + const len = struct_ty.structFieldCount(mod); while (i < len) : (i += 1) { if (field_inits[i] != .none) continue; - const default_val = struct_ty.structFieldDefaultValue(i); + const default_val = struct_ty.structFieldDefaultValue(i, mod); if (default_val.ip_index == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -18044,11 +18068,11 @@ fn finishStructInit( root_msg = try sema.errMsg(block, init_src, template, .{i}); } } else { - field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i), default_val); + field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i, mod), default_val); } } } else { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; for (struct_obj.fields.values(), 0..) |field, i| { if (field_inits[i] != .none) continue; @@ -18068,11 +18092,11 @@ fn finishStructInit( } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod); + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(sema.mod); defer gpa.free(fqn); try sema.mod.errNoteNonLazy( - struct_obj.data.srcLoc(sema.mod), + struct_obj.srcLoc(sema.mod), msg, "struct '{s}' declared here", .{fqn}, @@ -18277,7 +18301,7 @@ fn zirArrayInit( for (args[1..], 0..) |arg, i| { const resolved_arg = try sema.resolveInst(arg); const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) - array_ty.structFieldType(i) + array_ty.structFieldType(i, mod) else array_ty.elemType2(mod); resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) { @@ -18331,12 +18355,12 @@ fn zirArrayInit( }); const alloc = try block.addTy(.alloc, alloc_ty); - if (array_ty.isTuple()) { + if (array_ty.isTuple(mod)) { for (resolved_args, 0..) |arg, i| { const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = array_ty.structFieldType(i), + .pointee_type = array_ty.structFieldType(i, mod), }); const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty); @@ -18514,7 +18538,7 @@ fn fieldType( const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); return sema.addType(cur_ty.tupleFields().types[field_index]); } - const struct_obj = cur_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(cur_ty).?; const field = struct_obj.fields.get(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); return sema.addType(field.ty); @@ -19185,13 +19209,13 @@ fn zirReify( .file_scope = block.getFileScope(mod), }); const new_namespace = mod.namespacePtr(new_namespace_index); - errdefer @panic("TODO error handling"); + errdefer mod.destroyNamespace(new_namespace_index); const opaque_ty = try mod.intern_pool.get(gpa, .{ .opaque_type = .{ .decl = new_decl_index, .namespace = new_namespace_index, } }); - errdefer @panic("TODO error handling"); + errdefer mod.intern_pool.remove(opaque_ty); new_decl.val = opaque_ty.toValue(); new_namespace.ty = opaque_ty.toType(); @@ -19493,22 +19517,34 @@ fn reifyStruct( name_strategy: Zir.Inst.NameStrategy, is_tuple: bool, ) CompileError!Air.Inst.Ref { - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + const mod = sema.mod; + const gpa = sema.gpa; + + var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const new_struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); - const mod = sema.mod; + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = new_struct_val, + .val = undefined, }, name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - struct_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const struct_index = try mod.createStruct(.{ .owner_decl = new_decl_index, .fields = .{}, .zir_index = inst, @@ -19516,12 +19552,19 @@ fn reifyStruct( .status = .have_field_types, .known_non_opv = false, .is_tuple = is_tuple, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = struct_ty, - .file_scope = block.getFileScope(mod), - }), - }; + .namespace = new_namespace_index, + }); + const struct_obj = mod.structPtr(struct_index); + errdefer mod.destroyStruct(struct_index); + + const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + errdefer mod.intern_pool.remove(struct_ty); + + new_decl.val = struct_ty.toValue(); + new_namespace.ty = struct_ty.toType(); // Fields const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); @@ -19609,7 +19652,7 @@ fn reifyStruct( if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19619,7 +19662,7 @@ fn reifyStruct( if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19629,7 +19672,7 @@ fn reifyStruct( if (struct_obj.layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) { const msg = msg: { const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = sema.mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .struct_field); @@ -19641,7 +19684,7 @@ fn reifyStruct( } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = sema.mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty); @@ -19660,7 +19703,7 @@ fn reifyStruct( sema.resolveTypeLayout(field.ty) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; - try sema.addFieldErrNote(struct_ty, index, msg, "while checking this field", .{}); + try sema.addFieldErrNote(struct_ty.toType(), index, msg, "while checking this field", .{}); return err; }, else => return err, @@ -20558,21 +20601,21 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 }, } - const field_index = if (ty.isTuple()) blk: { + const field_index = if (ty.isTuple(mod)) blk: { if (mem.eql(u8, field_name, "len")) { return sema.fail(block, src, "no offset available for 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src); } else try sema.structFieldIndex(block, ty, field_name, rhs_src); - if (ty.structFieldIsComptime(field_index)) { + if (ty.structFieldIsComptime(field_index, mod)) { return sema.fail(block, src, "no offset available for comptime field", .{}); } - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed => { var bit_sum: u64 = 0; - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.values(), 0..) |field, i| { if (i == field_index) { return bit_sum; @@ -21810,6 +21853,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const modifier_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const func_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -21869,11 +21913,11 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const args = try sema.resolveInst(extra.args); const args_ty = sema.typeOf(args); - if (!args_ty.isTuple() and args_ty.ip_index != .empty_struct_type) { + if (!args_ty.isTuple(mod) and args_ty.ip_index != .empty_struct_type) { return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)}); } - var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount()); + var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod)); for (resolved_args, 0..) |*resolved, i| { resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty); } @@ -21905,7 +21949,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_index = switch (parent_ty.zigTypeTag(mod)) { .Struct => blk: { - if (parent_ty.isTuple()) { + if (parent_ty.isTuple(mod)) { if (mem.eql(u8, field_name, "len")) { return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{}); } @@ -21918,7 +21962,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr else => unreachable, }; - if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index)) { + if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index, mod)) { return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{}); } @@ -21926,17 +21970,17 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ - .pointee_type = parent_ty.structFieldType(field_index), + .pointee_type = parent_ty.structFieldType(field_index, mod), .mutable = field_ptr_ty_info.mutable, .@"addrspace" = field_ptr_ty_info.@"addrspace", }; - if (parent_ty.containerLayout() == .Packed) { + if (parent_ty.containerLayout(mod) == .Packed) { return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{}); } else { ptr_ty_data.@"align" = blk: { - if (parent_ty.castTag(.@"struct")) |struct_obj| { - break :blk struct_obj.data.fields.values()[field_index].abi_align; + if (mod.typeToStruct(parent_ty)) |struct_obj| { + break :blk struct_obj.fields.values()[field_index].abi_align; } else if (parent_ty.cast(Type.Payload.Union)) |union_obj| { break :blk union_obj.data.fields.values()[field_index].abi_align; } else { @@ -23380,8 +23424,7 @@ fn explainWhyTypeIsComptimeInner( .Struct => { if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return; - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(ty)) |struct_obj| { for (struct_obj.fields.values(), 0..) |field, i| { const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{ .index = i, @@ -23472,7 +23515,7 @@ fn validateExternType( .Enum => { return sema.validateExternType(try ty.intTagType(mod), position); }, - .Struct, .Union => switch (ty.containerLayout()) { + .Struct, .Union => switch (ty.containerLayout(mod)) { .Extern => return true, .Packed => { const bit_size = try ty.bitSizeAdvanced(mod, sema); @@ -23569,7 +23612,7 @@ fn explainWhyTypeIsNotExtern( /// Returns true if `ty` is allowed in packed types. /// Does *NOT* require `ty` to be resolved in any way. -fn validatePackedType(ty: Type, mod: *const Module) bool { +fn validatePackedType(ty: Type, mod: *Module) bool { switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, @@ -23595,7 +23638,7 @@ fn validatePackedType(ty: Type, mod: *const Module) bool { .Enum, => return true, .Pointer => return !ty.isSlice(mod), - .Struct, .Union => return ty.containerLayout() == .Packed, + .Struct, .Union => return ty.containerLayout(mod) == .Packed, } } @@ -24419,27 +24462,27 @@ fn fieldCallBind( switch (concrete_ty.zigTypeTag(mod)) { .Struct => { const struct_ty = try sema.resolveTypeFields(concrete_ty); - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const field_index_usize = struct_obj.data.fields.getIndex(field_name) orelse + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const field_index_usize = struct_obj.fields.getIndex(field_name) orelse break :find_field; const field_index = @intCast(u32, field_index_usize); - const field = struct_obj.data.fields.values()[field_index]; + const field = struct_obj.fields.values()[field_index]; return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); - } else if (struct_ty.isTuple()) { + } else if (struct_ty.isTuple(mod)) { if (mem.eql(u8, field_name, "len")) { - return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount()) }; + return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)) }; } if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { - if (field_index >= struct_ty.structFieldCount()) break :find_field; - return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index), field_index, object_ptr); + if (field_index >= struct_ty.structFieldCount(mod)) break :find_field; + return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index, mod), field_index, object_ptr); } else |_| {} } else { - const max = struct_ty.structFieldCount(); + const max = struct_ty.structFieldCount(mod); var i: u32 = 0; while (i < max) : (i += 1) { - if (mem.eql(u8, struct_ty.structFieldName(i), field_name)) { - return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i), i, object_ptr); + if (mem.eql(u8, struct_ty.structFieldName(i, mod), field_name)) { + return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr); } } } @@ -24651,9 +24694,9 @@ fn structFieldPtr( const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); try sema.resolveStructLayout(struct_ty); - if (struct_ty.isTuple()) { + if (struct_ty.isTuple(mod)) { if (mem.eql(u8, field_name, "len")) { - const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount()); + const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)); return sema.analyzeRef(block, src, len_inst); } const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src); @@ -24663,7 +24706,7 @@ fn structFieldPtr( return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); } - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field_index_big = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); @@ -24687,7 +24730,7 @@ fn structFieldPtrByIndex( } const mod = sema.mod; - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod); @@ -24799,8 +24842,11 @@ fn structFieldVal( const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); }, - .@"struct" => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); const field_index_usize = struct_obj.fields.getIndex(field_name) orelse @@ -24827,7 +24873,6 @@ fn structFieldVal( }, else => unreachable, }, - else => unreachable, } } @@ -24840,8 +24885,9 @@ fn tupleFieldVal( field_name_src: LazySrcLoc, tuple_ty: Type, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (mem.eql(u8, field_name, "len")) { - return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount()); + return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount(mod)); } const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src); return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty); @@ -24858,7 +24904,7 @@ fn tupleFieldIndex( const mod = sema.mod; assert(!std.mem.eql(u8, field_name, "len")); if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { - if (field_index < tuple_ty.structFieldCount()) return field_index; + if (field_index < tuple_ty.structFieldCount(mod)) return field_index; return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ field_name, tuple_ty.fmt(mod), }); @@ -24878,7 +24924,7 @@ fn tupleFieldValByIndex( tuple_ty: Type, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - const field_ty = tuple_ty.structFieldType(field_index); + const field_ty = tuple_ty.structFieldType(field_index, mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); @@ -25251,7 +25297,7 @@ fn tupleFieldPtr( const tuple_ptr_ty = sema.typeOf(tuple_ptr); const tuple_ty = tuple_ptr_ty.childType(mod); _ = try sema.resolveTypeFields(tuple_ty); - const field_count = tuple_ty.structFieldCount(); + const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { return sema.fail(block, tuple_ptr_src, "indexing into empty tuple is not allowed", .{}); @@ -25263,7 +25309,7 @@ fn tupleFieldPtr( }); } - const field_ty = tuple_ty.structFieldType(field_index); + const field_ty = tuple_ty.structFieldType(field_index, mod); const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, .mutable = tuple_ptr_ty.ptrIsMutable(mod), @@ -25308,7 +25354,7 @@ fn tupleField( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const tuple_ty = try sema.resolveTypeFields(sema.typeOf(tuple)); - const field_count = tuple_ty.structFieldCount(); + const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { return sema.fail(block, tuple_src, "indexing into empty tuple is not allowed", .{}); @@ -25320,7 +25366,7 @@ fn tupleField( }); } - const field_ty = tuple_ty.structFieldType(field_index); + const field_ty = tuple_ty.structFieldType(field_index, mod); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field @@ -25919,7 +25965,7 @@ fn coerceExtra( .Array => { // pointer to tuple to pointer to array if (inst_ty.isSinglePointer(mod) and - inst_ty.childType(mod).isTuple() and + inst_ty.childType(mod).isTuple(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25939,11 +25985,11 @@ fn coerceExtra( if (!inst_ty.isSinglePointer(mod)) break :to_slice; const inst_child_ty = inst_ty.childType(mod); - if (!inst_child_ty.isTuple()) break :to_slice; + if (!inst_child_ty.isTuple(mod)) break :to_slice; // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. - if (inst_child_ty.structFieldCount() == 0) { + if (inst_child_ty.structFieldCount(mod) == 0) { // Optional slice is represented with a null pointer so // we use a dummy pointer value with the required alignment. const slice_val = try Value.Tag.slice.create(sema.arena, .{ @@ -26213,7 +26259,7 @@ fn coerceExtra( if (inst == .empty_struct) { return sema.arrayInitEmpty(block, inst_src, dest_ty); } - if (inst_ty.isTuple()) { + if (inst_ty.isTuple(mod)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -26225,7 +26271,7 @@ fn coerceExtra( .Vector => switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { - if (inst_ty.isTuple()) { + if (inst_ty.isTuple(mod)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -26238,7 +26284,7 @@ fn coerceExtra( if (inst == .empty_struct) { return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src); } - if (inst_ty.isTupleOrAnonStruct()) { + if (inst_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) { error.NotCoercible => break :blk, else => |e| return e, @@ -27304,8 +27350,8 @@ fn storePtr2( // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); - if (operand_ty.isTuple() and elem_ty.zigTypeTag(mod) == .Array) { - const field_count = operand_ty.structFieldCount(); + if (operand_ty.isTuple(mod) and elem_ty.zigTypeTag(mod) == .Array) { + const field_count = operand_ty.structFieldCount(mod); var i: u32 = 0; while (i < field_count) : (i += 1) { const elem_src = operand_src; // TODO better source location @@ -27804,7 +27850,7 @@ fn beginComptimePtrMutation( switch (parent.ty.zigTypeTag(mod)) { .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount()); + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); @memset(fields, Value.undef); val_ptr.* = try Value.Tag.aggregate.create(arena, fields); @@ -27813,7 +27859,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), &fields[field_index], ptr_elem_ty, parent.decl_ref_mut, @@ -27832,7 +27878,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), &payload.data.val, ptr_elem_ty, parent.decl_ref_mut, @@ -27878,7 +27924,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), duped, ptr_elem_ty, parent.decl_ref_mut, @@ -27889,7 +27935,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), &val_ptr.castTag(.aggregate).?.data[field_index], ptr_elem_ty, parent.decl_ref_mut, @@ -27907,7 +27953,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), &payload.val, ptr_elem_ty, parent.decl_ref_mut, @@ -28269,8 +28315,8 @@ fn beginComptimePtrLoad( var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { - const struct_ty = field_ptr.container_ty.castTag(.@"struct"); - if (struct_ty != null and struct_ty.?.data.layout == .Packed) { + const struct_obj = mod.typeToStruct(field_ptr.container_ty); + if (struct_obj != null and struct_obj.?.layout == .Packed) { // packed structs are not byte addressable deref.parent = null; } else if (deref.parent) |*parent| { @@ -28310,7 +28356,7 @@ fn beginComptimePtrLoad( else => unreachable, }; } else { - const field_ty = field_ptr.container_ty.structFieldType(field_index); + const field_ty = field_ptr.container_ty.structFieldType(field_index, mod); deref.pointee = TypedValue{ .ty = field_ty, .val = try tv.val.fieldValue(tv.ty, mod, field_index), @@ -28483,7 +28529,7 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul const inst_info = inst_ty.ptrInfo(mod); const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel(mod) == 0 or (inst_info.pointee_type.arrayLen(mod) == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or - (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0); + (inst_info.pointee_type.isTuple(mod) and inst_info.pointee_type.structFieldCount(mod) == 0); const ok_cv_qualifiers = ((inst_info.mutable or !dest_info.mutable) or len0) and @@ -28714,8 +28760,9 @@ fn coerceAnonStructToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const field_count = inst_ty.structFieldCount(); + const field_count = inst_ty.structFieldCount(mod); if (field_count != 1) { const msg = msg: { const msg = if (field_count > 1) try sema.errMsg( @@ -28927,7 +28974,7 @@ fn coerceTupleToSlicePtrs( const tuple_ty = sema.typeOf(ptr_tuple).childType(mod); const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); const slice_info = slice_ty.ptrInfo(mod); - const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, sema.mod); + const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(mod), slice_info.sentinel, slice_info.pointee_type, sema.mod); const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src); if (slice_info.@"align" != 0) { return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{}); @@ -28966,20 +29013,21 @@ fn coerceTupleToStruct( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const struct_ty = try sema.resolveTypeFields(dest_ty); - if (struct_ty.isTupleOrAnonStruct()) { + if (struct_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); } - const fields = struct_ty.structFields(); + const fields = struct_ty.structFields(mod); const field_vals = try sema.arena.alloc(Value, fields.count()); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); var runtime_src: ?LazySrcLoc = null; - const field_count = inst_ty.structFieldCount(); + const field_count = inst_ty.structFieldCount(mod); var field_i: u32 = 0; while (field_i < field_count) : (field_i += 1) { const field_src = inst_src; // TODO better source location @@ -29061,13 +29109,14 @@ fn coerceTupleToTuple( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const dest_field_count = tuple_ty.structFieldCount(); + const mod = sema.mod; + const dest_field_count = tuple_ty.structFieldCount(mod); const field_vals = try sema.arena.alloc(Value, dest_field_count); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const inst_field_count = inst_ty.structFieldCount(); + const inst_field_count = inst_ty.structFieldCount(mod); if (inst_field_count > dest_field_count) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; @@ -29085,8 +29134,8 @@ fn coerceTupleToTuple( const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); - const field_ty = tuple_ty.structFieldType(field_i); - const default_val = tuple_ty.structFieldDefaultValue(field_i); + const field_ty = tuple_ty.structFieldType(field_i, mod); + const default_val = tuple_ty.structFieldDefaultValue(field_i, mod); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); field_refs[field_index] = coerced; @@ -29115,12 +29164,12 @@ fn coerceTupleToTuple( for (field_refs, 0..) |*field_ref, i| { if (field_ref.* != .none) continue; - const default_val = tuple_ty.structFieldDefaultValue(i); - const field_ty = tuple_ty.structFieldType(i); + const default_val = tuple_ty.structFieldDefaultValue(i, mod); + const field_ty = tuple_ty.structFieldType(i, mod); const field_src = inst_src; // TODO better source location if (default_val.ip_index == .unreachable_value) { - if (tuple_ty.isTuple()) { + if (tuple_ty.isTuple(mod)) { const template = "missing tuple field: {d}"; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, .{i}); @@ -29130,7 +29179,7 @@ fn coerceTupleToTuple( continue; } const template = "missing struct field: {s}"; - const args = .{tuple_ty.structFieldName(i)}; + const args = .{tuple_ty.structFieldName(i, mod)}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -31222,17 +31271,17 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { } fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - if (resolved_ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(resolved_ty)) |struct_obj| { switch (struct_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { const msg = try Module.ErrorMsg.create( sema.gpa, - struct_obj.srcLoc(sema.mod), + struct_obj.srcLoc(mod), "struct '{}' depends on itself", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); return sema.failWithOwnedErrorMsg(msg); }, @@ -31256,7 +31305,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { } if (struct_obj.layout == .Packed) { - try semaBackingIntType(sema.mod, struct_obj); + try semaBackingIntType(mod, struct_obj); } struct_obj.status = .have_layout; @@ -31265,20 +31314,20 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, - struct_obj.srcLoc(sema.mod), + struct_obj.srcLoc(mod), "struct layout depends on it having runtime bits", .{}, ); return sema.failWithOwnedErrorMsg(msg); } - if (struct_obj.layout == .Auto and sema.mod.backendSupportsFeature(.field_reordering)) { + if (struct_obj.layout == .Auto and mod.backendSupportsFeature(.field_reordering)) { const optimized_order = if (struct_obj.owner_decl == sema.owner_decl_index) try sema.perm_arena.alloc(u32, struct_obj.fields.count()) else blk: { - const decl = sema.mod.declPtr(struct_obj.owner_decl); + const decl = mod.declPtr(struct_obj.owner_decl); var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(sema.mod.gpa, &decl_arena); + const decl_arena_allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count()); }; @@ -31528,7 +31577,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return switch (ty.ip_index) { .empty_struct_type => false, .none => switch (ty.tag()) { - .empty_struct, .error_set, .error_set_single, .error_set_inferred, @@ -31569,27 +31617,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return false; }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - var requires_comptime = false; - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; - } - if (requires_comptime) { - struct_obj.requires_comptime = .yes; - } else { - struct_obj.requires_comptime = .no; - } - return requires_comptime; - }, - } - }, - .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; switch (union_obj.requires_comptime) { @@ -31686,7 +31713,27 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .type_info, => true, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + var requires_comptime = false; + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; + } + if (requires_comptime) { + struct_obj.requires_comptime = .yes; + } else { + struct_obj.requires_comptime = .no; + } + return requires_comptime; + }, + } + }, + .union_type => @panic("TODO"), .opaque_type => false, @@ -31697,6 +31744,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -31710,16 +31758,21 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, - .Struct => switch (ty.tag()) { - .@"struct" => return sema.resolveStructFully(ty), - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); + .Struct => switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); - for (tuple.types) |field_ty| { - try sema.resolveTypeFully(field_ty); - } + for (tuple.types) |field_ty| { + try sema.resolveTypeFully(field_ty); + } + }, + else => {}, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => return sema.resolveStructFully(ty), + else => {}, }, - else => {}, }, .Union => return sema.resolveUnionFully(ty), .Array => return sema.resolveTypeFully(ty.childType(mod)), @@ -31746,9 +31799,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { try sema.resolveStructLayout(ty); + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const payload = resolved_ty.castTag(.@"struct").?; - const struct_obj = payload.data; + const struct_obj = mod.typeToStruct(resolved_ty).?; switch (struct_obj.status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, @@ -31806,11 +31859,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { switch (ty.ip_index) { .none => switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - try sema.resolveTypeFieldsStruct(ty, struct_obj); - return ty; - }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; try sema.resolveTypeFieldsUnion(ty, union_obj); @@ -31904,7 +31952,11 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .prefetch_options_type => return sema.getBuiltinType("PrefetchOptions"), _ => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return ty; + try sema.resolveTypeFieldsStruct(ty, struct_obj); + return ty; + }, .union_type => @panic("TODO"), else => return ty, }, @@ -33010,28 +33062,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } }, - .@"struct" => { - const resolved_ty = try sema.resolveTypeFields(ty); - const s = resolved_ty.castTag(.@"struct").?.data; - for (s.fields.values(), 0..) |field, i| { - if (field.is_comptime) continue; - if (field.ty.eql(resolved_ty, sema.mod)) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - s.srcLoc(sema.mod), - "struct '{}' depends on itself", - .{ty.fmt(sema.mod)}, - ); - try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(msg); - } - if ((try sema.typeHasOnePossibleValue(field.ty)) == null) { - return null; - } - } - return Value.empty_struct; - }, - .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.values, 0..) |val, i| { @@ -33120,8 +33150,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }); }, - .empty_struct => return Value.empty_struct, - .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); @@ -33212,7 +33240,34 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .generic_poison => return error.GenericPoison, .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const resolved_ty = try sema.resolveTypeFields(ty); + if (mod.structPtrUnwrap(struct_type.index)) |s| { + for (s.fields.values(), 0..) |field, i| { + if (field.is_comptime) continue; + if (field.ty.eql(resolved_ty, sema.mod)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + s.srcLoc(sema.mod), + "struct '{}' depends on itself", + .{ty.fmt(sema.mod)}, + ); + try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); + return sema.failWithOwnedErrorMsg(msg); + } + if ((try sema.typeHasOnePossibleValue(field.ty)) == null) { + return null; + } + } + } + // In this case the struct has no fields and therefore has one possible value. + const empty = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .fields = &.{}, + } }); + return empty.toValue(); + }, + .union_type => @panic("TODO"), .opaque_type => null, @@ -33223,6 +33278,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, } } @@ -33614,7 +33670,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .empty_struct_type => false, .none => switch (ty.tag()) { - .empty_struct, .error_set, .error_set_single, .error_set_inferred, @@ -33655,31 +33710,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return false; }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - if (struct_obj.status == .field_types_wip) - return false; - - try sema.resolveTypeFieldsStruct(ty, struct_obj); - - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try sema.typeRequiresComptime(field.ty)) { - struct_obj.requires_comptime = .yes; - return true; - } - } - struct_obj.requires_comptime = .no; - return false; - }, - } - }, - .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; switch (union_obj.requires_comptime) { @@ -33782,7 +33812,31 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (struct_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsStruct(ty, struct_obj); + + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (try sema.typeRequiresComptime(field.ty)) { + struct_obj.requires_comptime = .yes; + return true; + } + } + struct_obj.requires_comptime = .no; + return false; + }, + } + }, + .union_type => @panic("TODO"), .opaque_type => false, @@ -33793,6 +33847,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -33864,11 +33919,12 @@ fn structFieldIndex( field_name: []const u8, field_src: LazySrcLoc, ) !u32 { + const mod = sema.mod; const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); if (struct_ty.isAnonStruct()) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field_index_usize = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); return @intCast(u32, field_index_usize); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index fae637cf24..2105d3108f 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -180,7 +180,7 @@ pub fn print( switch (field_ptr.container_ty.tag()) { .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), else => { - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index); + const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod); return writer.print(".{s}", .{field_name}); }, } @@ -381,21 +381,27 @@ fn printAggregate( } if (ty.zigTypeTag(mod) == .Struct) { try writer.writeAll(".{"); - const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); + const max_len = std.math.min(ty.structFieldCount(mod), max_aggregate_items); var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); - switch (ty.tag()) { - .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), - else => {}, + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .anon_struct => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), + else => {}, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), + else => {}, + }, } try print(.{ - .ty = ty.structFieldType(i), + .ty = ty.structFieldType(i, mod), .val = try val.fieldValue(ty, mod, i), }, writer, level - 1, mod); } - if (ty.structFieldCount() > max_aggregate_items) { + if (ty.structFieldCount(mod) > max_aggregate_items) { try writer.writeAll(", ..."); } return writer.writeAll("}"); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 970d59a25f..3e893411fc 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4119,7 +4119,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); - const struct_field_ty = struct_ty.structFieldType(index); + const struct_field_ty = struct_ty.structFieldType(index, mod); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { @@ -5466,10 +5466,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1); + const overflow_bit_ty = ty.structFieldType(1, mod); const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 1d042b632a..6589425fc2 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -21,7 +21,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class { var maybe_float_bits: ?u16 = null; switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.containerLayout() == .Packed) return .byval; + if (ty.containerLayout(mod) == .Packed) return .byval; const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; @@ -31,7 +31,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class { return .integer; }, .Union => { - if (ty.containerLayout() == .Packed) return .byval; + if (ty.containerLayout(mod) == .Packed) return .byval; const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; @@ -90,11 +90,11 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { return max_count; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var count: u8 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); + const field_ty = ty.structFieldType(i, mod); const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; @@ -125,10 +125,10 @@ pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type { return null; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); + const field_ty = ty.structFieldType(i, mod); if (getFloatArrayType(field_ty, mod)) |some| return some; } return null; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 50f6d76c55..5cc165fdfe 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2910,7 +2910,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mcv = try self.resolveInst(operand); const struct_ty = self.typeOf(operand); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); - const struct_field_ty = struct_ty.structFieldType(index); + const struct_field_ty = struct_ty.structFieldType(index, mod); switch (mcv) { .dead, .unreach => unreachable, @@ -5404,10 +5404,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); - const overflow_bit_ty = ty.structFieldType(1); + const overflow_bit_ty = ty.structFieldType(1, mod); const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 79ffadf831..7a7d632837 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -32,7 +32,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { switch (ty.zigTypeTag(mod)) { .Struct => { const bit_size = ty.bitSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (bit_size > 64) return .memory; return .byval; } @@ -40,10 +40,10 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; - const fields = ty.structFieldCount(); + const fields = ty.structFieldCount(mod); var i: u32 = 0; while (i < fields) : (i += 1) { - const field_ty = ty.structFieldType(i); + const field_ty = ty.structFieldType(i, mod); const field_alignment = ty.structFieldAlign(i, mod); const field_size = field_ty.bitSize(mod); if (field_size > 32 or field_alignment > 32) { @@ -54,7 +54,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { }, .Union => { const bit_size = ty.bitSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (bit_size > 64) return .memory; return .byval; } @@ -132,11 +132,11 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 { return max_count; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var count: u32 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); + const field_ty = ty.structFieldType(i, mod); const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index 28a69d9136..41a1850635 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -15,7 +15,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class { switch (ty.zigTypeTag(mod)) { .Struct => { const bit_size = ty.bitSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; } @@ -26,7 +26,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class { }, .Union => { const bit_size = ty.bitSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 0490db615b..0677b72f1a 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3993,10 +3993,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1); + const overflow_bit_ty = ty.structFieldType(1, mod); const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b484e21424..90c26d5d84 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1006,9 +1006,9 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64; break :blk wasm.Valtype.i32; // represented as pointer to stack }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; return typeToValtype(struct_obj.backing_int_ty, mod); }, else => wasm.Valtype.i32, @@ -1017,7 +1017,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { .direct => wasm.Valtype.v128, .unrolled => wasm.Valtype.i32, }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Packed => { const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory"); return typeToValtype(int_ty, mod); @@ -1747,8 +1747,7 @@ fn isByRef(ty: Type, mod: *Module) bool { return ty.hasRuntimeBitsIgnoreComptime(mod); }, .Struct => { - if (ty.castTag(.@"struct")) |struct_ty| { - const struct_obj = struct_ty.data; + if (mod.typeToStruct(ty)) |struct_obj| { if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { return isByRef(struct_obj.backing_int_ty, mod); } @@ -2954,11 +2953,11 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue const parent_ty = field_ptr.container_ty; const field_offset = switch (parent_ty.zigTypeTag(mod)) { - .Struct => switch (parent_ty.containerLayout()) { + .Struct => switch (parent_ty.containerLayout(mod)) { .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod), else => parent_ty.structFieldOffset(field_ptr.field_index, mod), }, - .Union => switch (parent_ty.containerLayout()) { + .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, else => blk: { const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod); @@ -3158,7 +3157,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { return WValue{ .imm32 = @boolToInt(is_pl) }; }, .Struct => { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; assert(struct_obj.layout == .Packed); var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; @@ -3225,7 +3224,7 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { return WValue{ .imm32 = 0xaaaaaaaa }; }, .Struct => { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; assert(struct_obj.layout == .Packed); return func.emitUndefined(struct_obj.backing_int_ty); }, @@ -3635,7 +3634,7 @@ fn structFieldPtr( ) InnerError!WValue { const mod = func.bin_file.base.options.module.?; const result_ty = func.typeOfIndex(inst); - const offset = switch (struct_ty.containerLayout()) { + const offset = switch (struct_ty.containerLayout(mod)) { .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { if (result_ty.ptrInfo(mod).host_size != 0) { @@ -3668,13 +3667,13 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const struct_ty = func.typeOf(struct_field.struct_operand); const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index); + const field_ty = struct_ty.structFieldType(field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); - const result = switch (struct_ty.containerLayout()) { + const result = switch (struct_ty.containerLayout(mod)) { .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => result: { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const offset = struct_obj.packedFieldBitOffset(mod, field_index); const backing_ty = struct_obj.backing_int_ty; const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse { @@ -4998,12 +4997,12 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } break :result_value result; }, - .Struct => switch (result_ty.containerLayout()) { + .Struct => switch (result_ty.containerLayout(mod)) { .Packed => { if (isByRef(result_ty, mod)) { return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); } - const struct_obj = result_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(result_ty).?; const fields = struct_obj.fields.values(); const backing_type = struct_obj.backing_int_ty; @@ -5051,7 +5050,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { for (elements, 0..) |elem, elem_index| { if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_index); + const elem_ty = result_ty.structFieldType(elem_index, mod); const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const value = try func.resolveInst(elem); try func.store(offset, value, elem_ty, 0); diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index bb5911382b..ee836bebdb 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -26,14 +26,14 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class { if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } // When the struct type is non-scalar - if (ty.structFieldCount() > 1) return memory; + if (ty.structFieldCount(mod) > 1) return memory; // When the struct's alignment is non-natural - const field = ty.structFields().values()[0]; + const field = ty.structFields(mod).values()[0]; if (field.abi_align != 0) { if (field.abi_align > field.ty.abiAlignment(mod)) { return memory; @@ -64,7 +64,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class { return direct; }, .Union => { - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } @@ -96,19 +96,19 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class { pub fn scalarType(ty: Type, mod: *Module) Type { switch (ty.zigTypeTag(mod)) { .Struct => { - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; return scalarType(struct_obj.backing_int_ty, mod); }, else => { - std.debug.assert(ty.structFieldCount() == 1); - return scalarType(ty.structFieldType(0), mod); + std.debug.assert(ty.structFieldCount(mod) == 1); + return scalarType(ty.structFieldType(0, mod), mod); }, } }, .Union => { - if (ty.containerLayout() != .Packed) { + if (ty.containerLayout(mod) != .Packed) { const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0 and layout.tag_size != 0) { return scalarType(ty.unionTagTypeSafety().?, mod); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 4fb5267cb0..77661b2a14 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3252,13 +3252,13 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(1, mod)), - tuple_ty.structFieldType(1), + tuple_ty.structFieldType(1, mod), .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(0, mod)), - tuple_ty.structFieldType(0), + tuple_ty.structFieldType(0, mod), partial_mcv, ); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3289,7 +3289,7 @@ fn genSetFrameTruncatedOverflowCompare( }; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const ty = tuple_ty.structFieldType(0); + const ty = tuple_ty.structFieldType(0, mod); const int_info = ty.intInfo(mod); const hi_limb_bits = (int_info.bits - 1) % 64 + 1; @@ -3336,7 +3336,7 @@ fn genSetFrameTruncatedOverflowCompare( try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(1, mod)), - tuple_ty.structFieldType(1), + tuple_ty.structFieldType(1, mod), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); } @@ -3393,13 +3393,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(0, mod)), - tuple_ty.structFieldType(0), + tuple_ty.structFieldType(0, mod), partial_mcv, ); try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, tuple_ty.structFieldOffset(1, mod)), - tuple_ty.structFieldType(1), + tuple_ty.structFieldType(1, mod), .{ .immediate = 0 }, // cc being set is impossible ); } else try self.genSetFrameTruncatedOverflowCompare( @@ -5563,7 +5563,7 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 const ptr_field_ty = self.typeOfIndex(inst); const ptr_container_ty = self.typeOf(operand); const container_ty = ptr_container_ty.childType(mod); - const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { + const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) { .Auto, .Extern => container_ty.structFieldOffset(index, mod), .Packed => if (container_ty.zigTypeTag(mod) == .Struct and ptr_field_ty.ptrInfo(mod).host_size == 0) @@ -5591,16 +5591,16 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const container_ty = self.typeOf(operand); const container_rc = regClassForType(container_ty, mod); - const field_ty = container_ty.structFieldType(index); + const field_ty = container_ty.structFieldType(index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; const field_rc = regClassForType(field_ty, mod); const field_is_gp = field_rc.supersetOf(gp); const src_mcv = try self.resolveInst(operand); - const field_off = switch (container_ty.containerLayout()) { + const field_off = switch (container_ty.containerLayout(mod)) { .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8), - .Packed => if (container_ty.castTag(.@"struct")) |struct_obj| - struct_obj.data.packedFieldBitOffset(mod, index) + .Packed => if (mod.typeToStruct(container_ty)) |struct_obj| + struct_obj.packedFieldBitOffset(mod, index) else 0, }; @@ -10036,13 +10036,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal try self.genSetMem( base, disp + @intCast(i32, ty.structFieldOffset(0, mod)), - ty.structFieldType(0), + ty.structFieldType(0, mod), .{ .register = ro.reg }, ); try self.genSetMem( base, disp + @intCast(i32, ty.structFieldOffset(1, mod)), - ty.structFieldType(1), + ty.structFieldType(1, mod), .{ .eflags = ro.eflags }, ); }, @@ -11259,8 +11259,8 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .Struct => { const frame_index = try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); - if (result_ty.containerLayout() == .Packed) { - const struct_obj = result_ty.castTag(.@"struct").?.data; + if (result_ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(result_ty).?; try self.genInlineMemset( .{ .lea_frame = .{ .index = frame_index } }, .{ .immediate = 0 }, @@ -11269,7 +11269,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i); + const elem_ty = result_ty.structFieldType(elem_i, mod); const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); if (elem_bit_size > 64) { return self.fail( @@ -11341,7 +11341,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } else for (elements, 0..) |elem, elem_i| { if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i); + const elem_ty = result_ty.structFieldType(elem_i, mod); const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index 1bae899d33..45ce64a98e 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -41,7 +41,7 @@ pub fn classifyWindows(ty: Type, mod: *Module) Class { 1, 2, 4, 8 => return .integer, else => switch (ty.zigTypeTag(mod)) { .Int => return .win_i128, - .Struct, .Union => if (ty.containerLayout() == .Packed) { + .Struct, .Union => if (ty.containerLayout(mod) == .Packed) { return .win_i128; } else { return .memory; @@ -210,7 +210,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". const ty_size = ty.abiSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { assert(ty_size <= 128); result[0] = .integer; if (ty_size > 64) result[1] = .integer; @@ -221,7 +221,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { var result_i: usize = 0; // out of 8 var byte_i: usize = 0; // out of 8 - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.values()) |field| { if (field.abi_align != 0) { if (field.abi_align < field.ty.abiAlignment(mod)) { @@ -329,7 +329,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". const ty_size = ty.abiSize(mod); - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { assert(ty_size <= 128); result[0] = .integer; if (ty_size > 64) result[1] = .integer; diff --git a/src/codegen.zig b/src/codegen.zig index 70df1fc17b..b29af1ff93 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -503,8 +503,8 @@ pub fn generateSymbol( return Result.ok; }, .Struct => { - if (typed_value.ty.containerLayout() == .Packed) { - const struct_obj = typed_value.ty.castTag(.@"struct").?.data; + if (typed_value.ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(typed_value.ty).?; const fields = struct_obj.fields.values(); const field_vals = typed_value.val.castTag(.aggregate).?.data; const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; @@ -539,7 +539,7 @@ pub fn generateSymbol( const struct_begin = code.items.len; const field_vals = typed_value.val.castTag(.aggregate).?.data; for (field_vals, 0..) |field_val, index| { - const field_ty = typed_value.ty.structFieldType(index); + const field_ty = typed_value.ty.structFieldType(index, mod); if (!field_ty.hasRuntimeBits(mod)) continue; switch (try generateSymbol(bin_file, src_loc, .{ diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 36af222c7e..1c16216504 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -820,7 +820,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, Type.bool, val, initializer_type); return writer.writeAll(" }"); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto, .Extern => { if (!location.isInitializer()) { try writer.writeByte('('); @@ -830,9 +830,9 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; - for (0..ty.structFieldCount()) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); + for (0..ty.structFieldCount(mod)) |field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBits(mod)) continue; if (!empty) try writer.writeByte(','); @@ -1328,7 +1328,7 @@ pub const DeclGen = struct { }, else => unreachable, }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto, .Extern => { const field_vals = val.castTag(.aggregate).?.data; @@ -1341,8 +1341,8 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeByte(','); @@ -1363,8 +1363,8 @@ pub const DeclGen = struct { var eff_num_fields: usize = 0; for (0..field_vals.len) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; eff_num_fields += 1; @@ -1386,8 +1386,8 @@ pub const DeclGen = struct { var eff_index: usize = 0; var needs_closing_paren = false; for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const cast_context = IntCastContext{ .value = .{ .value = field_val } }; @@ -1416,8 +1416,8 @@ pub const DeclGen = struct { // a << a_off | b << b_off | c << c_off var empty = true; for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(" | "); @@ -1453,7 +1453,7 @@ pub const DeclGen = struct { const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?; const field_ty = ty.unionFields().values()[field_i].ty; const field_name = ty.unionFields().keys()[field_i]; - if (ty.containerLayout() == .Packed) { + if (ty.containerLayout(mod) == .Packed) { if (field_ty.hasRuntimeBits(mod)) { if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); @@ -5218,25 +5218,25 @@ fn fieldLocation( end: void, } { return switch (container_ty.zigTypeTag(mod)) { - .Struct => switch (container_ty.containerLayout()) { - .Auto, .Extern => for (field_index..container_ty.structFieldCount()) |next_field_index| { - if (container_ty.structFieldIsComptime(next_field_index)) continue; - const field_ty = container_ty.structFieldType(next_field_index); + .Struct => switch (container_ty.containerLayout(mod)) { + .Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index| { + if (container_ty.structFieldIsComptime(next_field_index, mod)) continue; + const field_ty = container_ty.structFieldType(next_field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; break .{ .field = if (container_ty.isSimpleTuple()) .{ .field = next_field_index } else - .{ .identifier = container_ty.structFieldName(next_field_index) } }; + .{ .identifier = container_ty.structFieldName(next_field_index, mod) } }; } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0) .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } else .begin, }, - .Union => switch (container_ty.containerLayout()) { + .Union => switch (container_ty.containerLayout(mod)) { .Auto, .Extern => { - const field_ty = container_ty.structFieldType(field_index); + const field_ty = container_ty.structFieldType(field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return if (container_ty.unionTagTypeSafety() != null and !container_ty.unionHasAllZeroBitFieldTypes(mod)) @@ -5417,101 +5417,111 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { // Ensure complete type definition is visible before accessing fields. _ = try f.typeToIndex(struct_ty, .complete); - const field_name: CValue = switch (struct_ty.tag()) { - .tuple, .anon_struct, .@"struct" => switch (struct_ty.containerLayout()) { - .Auto, .Extern => if (struct_ty.isSimpleTuple()) + const field_name: CValue = switch (struct_ty.ip_index) { + .none => switch (struct_ty.tag()) { + .tuple, .anon_struct => if (struct_ty.isSimpleTuple()) .{ .field = extra.field_index } else - .{ .identifier = struct_ty.structFieldName(extra.field_index) }, - .Packed => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; - const int_info = struct_ty.intInfo(mod); - - const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - - const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); - const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, - const field_int_signedness = if (inst_ty.isAbiInt(mod)) - inst_ty.intInfo(mod).signedness - else - .unsigned; - const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); - - const temp_local = try f.allocLocal(inst, field_int_ty); - try f.writeCValue(writer, temp_local, .Other); - try writer.writeAll(" = zig_wrap_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty); - try writer.writeAll("(("); - try f.renderType(writer, field_int_ty); - try writer.writeByte(')'); - const cant_cast = int_info.bits > 64; - if (cant_cast) { - if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); - try writer.writeAll("zig_lo_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); - try writer.writeByte('('); - } - if (bit_offset > 0) { - try writer.writeAll("zig_shr_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); - try writer.writeByte('('); - } - try f.writeCValue(writer, struct_byval, .Other); - if (bit_offset > 0) { - try writer.writeAll(", "); - try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - try writer.writeByte(')'); - } - if (cant_cast) try writer.writeByte(')'); - try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); - try writer.writeAll(");\n"); - if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local; + .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout(mod) == .Packed) { + const operand_lval = if (struct_byval == .constant) blk: { + const operand_local = try f.allocLocal(inst, struct_ty); + try f.writeCValue(writer, operand_local, .Other); + try writer.writeAll(" = "); + try f.writeCValue(writer, struct_byval, .Initializer); + try writer.writeAll(";\n"); + break :blk operand_local; + } else struct_byval; const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy("); - try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); - try writer.writeAll(", "); - try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); + try writer.writeAll("memcpy(&"); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(", &"); + try f.writeCValue(writer, operand_lval, .Other); try writer.writeAll(", sizeof("); try f.renderType(writer, inst_ty); try writer.writeAll("));\n"); - try freeLocal(f, inst, temp_local.new_local, 0); + + if (struct_byval == .constant) { + try freeLocal(f, inst, operand_lval.new_local, 0); + } + return local; + } else field_name: { + const name = struct_ty.unionFields().keys()[extra.field_index]; + break :field_name if (struct_ty.unionTagTypeSafety()) |_| + .{ .payload_identifier = name } + else + .{ .identifier = name }; }, + else => unreachable, }, - .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout() == .Packed) { - const operand_lval = if (struct_byval == .constant) blk: { - const operand_local = try f.allocLocal(inst, struct_ty); - try f.writeCValue(writer, operand_local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, struct_byval, .Initializer); - try writer.writeAll(";\n"); - break :blk operand_local; - } else struct_byval; + else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .struct_type => switch (struct_ty.containerLayout(mod)) { + .Auto, .Extern => if (struct_ty.isSimpleTuple()) + .{ .field = extra.field_index } + else + .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + .Packed => { + const struct_obj = mod.typeToStruct(struct_ty).?; + const int_info = struct_ty.intInfo(mod); - const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy(&"); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(", &"); - try f.writeCValue(writer, operand_lval, .Other); - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("));\n"); + const bit_offset_ty = try mod.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - if (struct_byval == .constant) { - try freeLocal(f, inst, operand_lval.new_local, 0); - } + const bit_offset = struct_obj.packedFieldBitOffset(mod, extra.field_index); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); - return local; - } else field_name: { - const name = struct_ty.unionFields().keys()[extra.field_index]; - break :field_name if (struct_ty.unionTagTypeSafety()) |_| - .{ .payload_identifier = name } - else - .{ .identifier = name }; + const field_int_signedness = if (inst_ty.isAbiInt(mod)) + inst_ty.intInfo(mod).signedness + else + .unsigned; + const field_int_ty = try mod.intType(field_int_signedness, @intCast(u16, inst_ty.bitSize(mod))); + + const temp_local = try f.allocLocal(inst, field_int_ty); + try f.writeCValue(writer, temp_local, .Other); + try writer.writeAll(" = zig_wrap_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty); + try writer.writeAll("(("); + try f.renderType(writer, field_int_ty); + try writer.writeByte(')'); + const cant_cast = int_info.bits > 64; + if (cant_cast) { + if (field_int_ty.bitSize(mod) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + try writer.writeAll("zig_lo_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); + try writer.writeByte('('); + } + if (bit_offset > 0) { + try writer.writeAll("zig_shr_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); + try writer.writeByte('('); + } + try f.writeCValue(writer, struct_byval, .Other); + if (bit_offset > 0) { + try writer.writeAll(", "); + try f.object.dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try writer.writeByte(')'); + } + if (cant_cast) try writer.writeByte(')'); + try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); + try writer.writeAll(");\n"); + if (inst_ty.eql(field_int_ty, f.object.dg.module)) return temp_local; + + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("memcpy("); + try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); + try writer.writeAll(", "); + try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); + try writer.writeAll(", sizeof("); + try f.renderType(writer, inst_ty); + try writer.writeAll("));\n"); + try freeLocal(f, inst, temp_local.new_local, 0); + return local; + }, + }, + else => unreachable, }, - else => unreachable, }; const local = try f.allocLocal(inst, inst_ty); @@ -6805,17 +6815,17 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try a.end(f, writer); } }, - .Struct => switch (inst_ty.containerLayout()) { + .Struct => switch (inst_ty.containerLayout(mod)) { .Auto, .Extern => for (resolved_elements, 0..) |element, field_i| { - if (inst_ty.structFieldIsComptime(field_i)) continue; - const field_ty = inst_ty.structFieldType(field_i); + if (inst_ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = inst_ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const a = try Assignment.start(f, writer, field_ty); try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple()) .{ .field = field_i } else - .{ .identifier = inst_ty.structFieldName(field_i) }); + .{ .identifier = inst_ty.structFieldName(field_i, mod) }); try a.assign(f, writer); try f.writeCValue(writer, element, .Other); try a.end(f, writer); @@ -6831,8 +6841,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { var empty = true; for (0..elements.len) |field_i| { - if (inst_ty.structFieldIsComptime(field_i)) continue; - const field_ty = inst_ty.structFieldType(field_i); + if (inst_ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = inst_ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) { @@ -6844,8 +6854,8 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } empty = true; for (resolved_elements, 0..) |element, field_i| { - if (inst_ty.structFieldIsComptime(field_i)) continue; - const field_ty = inst_ty.structFieldType(field_i); + if (inst_ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = inst_ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeAll(", "); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 799f18e3e4..3321df6d49 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -299,7 +299,7 @@ pub const CType = extern union { pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs { return init( struct_ty.structFieldAlign(field_i, mod), - struct_ty.structFieldType(field_i).abiAlignment(mod), + struct_ty.structFieldType(field_i, mod).abiAlignment(mod), ); } pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs { @@ -1486,23 +1486,23 @@ pub const CType = extern union { } }, - .Struct, .Union => |zig_ty_tag| if (ty.containerLayout() == .Packed) { - if (ty.castTag(.@"struct")) |struct_obj| { - try self.initType(struct_obj.data.backing_int_ty, kind, lookup); + .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) { + if (mod.typeToStruct(ty)) |struct_obj| { + try self.initType(struct_obj.backing_int_ty, kind, lookup); } else { const bits = @intCast(u16, ty.bitSize(mod)); const int_ty = try mod.intType(.unsigned, bits); try self.initType(int_ty, kind, lookup); } - } else if (ty.isTupleOrAnonStruct()) { + } else if (ty.isTupleOrAnonStruct(mod)) { if (lookup.isMutable()) { for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), + .Struct => ty.structFieldCount(mod), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; _ = try lookup.typeToIndex(field_ty, switch (kind) { .forward, .forward_parameter => .forward, @@ -1579,11 +1579,11 @@ pub const CType = extern union { } else { var is_packed = false; for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), + .Struct => ty.structFieldCount(mod), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); + const field_ty = ty.structFieldType(field_i, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const field_align = AlignAs.fieldAlign(ty, field_i, mod); @@ -1929,15 +1929,15 @@ pub const CType = extern union { => { const zig_ty_tag = ty.zigTypeTag(mod); const fields_len = switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), + .Struct => ty.structFieldCount(mod), .Union => ty.unionFields().count(), else => unreachable, }; var c_fields_len: usize = 0; for (0..fields_len) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; c_fields_len += 1; } @@ -1945,8 +1945,8 @@ pub const CType = extern union { const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); var c_field_i: usize = 0; for (0..fields_len) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; @@ -1955,7 +1955,7 @@ pub const CType = extern union { std.fmt.allocPrintZ(arena, "f{}", .{field_i}) else arena.dupeZ(u8, switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i), + .Struct => ty.structFieldName(field_i, mod), .Union => ty.unionFields().keys()[field_i], else => unreachable, }), @@ -2074,7 +2074,7 @@ pub const CType = extern union { .fwd_anon_struct, .fwd_anon_union, => { - if (!ty.isTupleOrAnonStruct()) return false; + if (!ty.isTupleOrAnonStruct(mod)) return false; var name_buf: [ std.fmt.count("f{}", .{std.math.maxInt(usize)}) @@ -2084,12 +2084,12 @@ pub const CType = extern union { const zig_ty_tag = ty.zigTypeTag(mod); var c_field_i: usize = 0; for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(), + .Struct => ty.structFieldCount(mod), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; defer c_field_i += 1; @@ -2105,7 +2105,7 @@ pub const CType = extern union { if (ty.isSimpleTuple()) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i), + .Struct => ty.structFieldName(field_i, mod), .Union => ty.unionFields().keys()[field_i], else => unreachable, }, @@ -2210,12 +2210,12 @@ pub const CType = extern union { const zig_ty_tag = ty.zigTypeTag(mod); for (0..switch (ty.zigTypeTag(mod)) { - .Struct => ty.structFieldCount(), + .Struct => ty.structFieldCount(mod), .Union => ty.unionFields().count(), else => unreachable, }) |field_i| { - const field_ty = ty.structFieldType(field_i); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or + const field_ty = ty.structFieldType(field_i, mod); + if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or !field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; self.updateHasherRecurse(hasher, field_ty, switch (self.kind) { @@ -2227,7 +2227,7 @@ pub const CType = extern union { hasher.update(if (ty.isSimpleTuple()) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i), + .Struct => ty.structFieldName(field_i, mod), .Union => ty.unionFields().keys()[field_i], else => unreachable, }); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 10cf66a69a..6b12c447dc 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1986,8 +1986,7 @@ pub const Object = struct { const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(ty)) |struct_obj| { if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { assert(struct_obj.haveLayout()); const info = struct_obj.backing_int_ty.intInfo(mod); @@ -2075,8 +2074,7 @@ pub const Object = struct { return full_di_ty; } - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(ty)) |struct_obj| { if (!struct_obj.haveFieldTypes()) { // This can happen if a struct type makes it all the way to // flush() without ever being instantiated or referenced (even @@ -2105,8 +2103,8 @@ pub const Object = struct { return struct_di_ty; } - const fields = ty.structFields(); - const layout = ty.containerLayout(); + const fields = ty.structFields(mod); + const layout = ty.containerLayout(mod); var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; defer di_fields.deinit(gpa); @@ -2116,7 +2114,7 @@ pub const Object = struct { comptime assert(struct_layout_version == 2); var offset: u64 = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); + var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; const field_size = field.ty.abiSize(mod); @@ -2990,7 +2988,7 @@ pub const DeclGen = struct { return llvm_struct_ty; } - const struct_obj = t.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(t).?; if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -3696,7 +3694,7 @@ pub const DeclGen = struct { } } - const struct_obj = tv.ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(tv.ty).?; if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -4043,7 +4041,7 @@ pub const DeclGen = struct { const llvm_u32 = dg.context.intType(32); switch (parent_ty.zigTypeTag(mod)) { .Union => { - if (parent_ty.containerLayout() == .Packed) { + if (parent_ty.containerLayout(mod) == .Packed) { return parent_llvm_ptr; } @@ -4065,14 +4063,14 @@ pub const DeclGen = struct { return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .Struct => { - if (parent_ty.containerLayout() == .Packed) { + if (parent_ty.containerLayout(mod) == .Packed) { if (!byte_aligned) return parent_llvm_ptr; const llvm_usize = dg.context.intType(target.ptrBitWidth()); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); // count bits of fields before this one const prev_bits = b: { var b: usize = 0; - for (parent_ty.structFields().values()[0..field_index]) |field| { + for (parent_ty.structFields(mod).values()[0..field_index]) |field| { if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; b += @intCast(usize, field.ty.bitSize(mod)); } @@ -5983,7 +5981,7 @@ pub const FuncGen = struct { const struct_ty = self.typeOf(struct_field.struct_operand); const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index); + const field_ty = struct_ty.structFieldType(field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) { return null; } @@ -5991,9 +5989,9 @@ pub const FuncGen = struct { if (!isByRef(struct_ty, mod)) { assert(!isByRef(field_ty, mod)); switch (struct_ty.zigTypeTag(mod)) { - .Struct => switch (struct_ty.containerLayout()) { + .Struct => switch (struct_ty.containerLayout(mod)) { .Packed => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const bit_offset = struct_obj.packedFieldBitOffset(mod, field_index); const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); @@ -6019,7 +6017,7 @@ pub const FuncGen = struct { }, }, .Union => { - assert(struct_ty.containerLayout() == .Packed); + assert(struct_ty.containerLayout(mod) == .Packed); const containing_int = struct_llvm_val; const elem_llvm_ty = try self.dg.lowerType(field_ty); if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) { @@ -6041,7 +6039,7 @@ pub const FuncGen = struct { switch (struct_ty.zigTypeTag(mod)) { .Struct => { - assert(struct_ty.containerLayout() != .Packed); + assert(struct_ty.containerLayout(mod) != .Packed); var ptr_ty_buf: Type.Payload.Pointer = undefined; const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); @@ -9289,8 +9287,8 @@ pub const FuncGen = struct { return vector; }, .Struct => { - if (result_ty.containerLayout() == .Packed) { - const struct_obj = result_ty.castTag(.@"struct").?.data; + if (result_ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(result_ty).?; assert(struct_obj.haveLayout()); const big_bits = struct_obj.backing_int_ty.bitSize(mod); const int_llvm_ty = self.context.intType(@intCast(c_uint, big_bits)); @@ -9795,7 +9793,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const struct_ty = struct_ptr_ty.childType(mod); switch (struct_ty.zigTypeTag(mod)) { - .Struct => switch (struct_ty.containerLayout()) { + .Struct => switch (struct_ty.containerLayout(mod)) { .Packed => { const result_ty = self.typeOfIndex(inst); const result_ty_info = result_ty.ptrInfo(mod); @@ -9838,7 +9836,7 @@ pub const FuncGen = struct { }, .Union => { const layout = struct_ty.unionGetLayout(mod); - if (layout.payload_size == 0 or struct_ty.containerLayout() == .Packed) return struct_ptr; + if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr; const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const union_llvm_ty = try self.dg.lowerType(struct_ty); const union_field_ptr = self.builder.buildStructGEP(union_llvm_ty, struct_ptr, payload_index, ""); @@ -10530,11 +10528,11 @@ fn llvmFieldIndex( } return null; } - const layout = ty.containerLayout(); + const layout = ty.containerLayout(mod); assert(layout != .Packed); var llvm_field_index: c_uint = 0; - var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(mod); + var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; const field_align = field.alignment(mod, layout); @@ -11113,7 +11111,7 @@ fn isByRef(ty: Type, mod: *Module) bool { .Array, .Frame => return ty.hasRuntimeBits(mod), .Struct => { // Packed structs are represented to LLVM as integers. - if (ty.containerLayout() == .Packed) return false; + if (ty.containerLayout(mod) == .Packed) return false; if (ty.isSimpleTupleOrAnonStruct()) { const tuple = ty.tupleFields(); var count: usize = 0; @@ -11127,7 +11125,7 @@ fn isByRef(ty: Type, mod: *Module) bool { return false; } var count: usize = 0; - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.values()) |field| { if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; @@ -11137,7 +11135,7 @@ fn isByRef(ty: Type, mod: *Module) bool { } return false; }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Packed => return false, else => return ty.hasRuntimeBits(mod), }, @@ -11176,8 +11174,8 @@ fn isScalar(mod: *Module, ty: Type) bool { .Vector, => true, - .Struct => ty.containerLayout() == .Packed, - .Union => ty.containerLayout() == .Packed, + .Struct => ty.containerLayout(mod) == .Packed, + .Union => ty.containerLayout(mod) == .Packed, else => false, }; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 52f94cc6d5..41b523b8f4 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -685,7 +685,7 @@ pub const DeclGen = struct { if (ty.isSimpleTupleOrAnonStruct()) { unreachable; // TODO } else { - const struct_ty = ty.castTag(.@"struct").?.data; + const struct_ty = mod.typeToStruct(ty).?; if (struct_ty.layout == .Packed) { return dg.todo("packed struct constants", .{}); @@ -1306,7 +1306,7 @@ pub const DeclGen = struct { } }); } - const struct_ty = ty.castTag(.@"struct").?.data; + const struct_ty = mod.typeToStruct(ty).?; if (struct_ty.layout == .Packed) { return try self.resolveType(struct_ty.backing_int_ty, .direct); @@ -2576,7 +2576,7 @@ pub const DeclGen = struct { const struct_ty = self.typeOf(struct_field.struct_operand); const object_id = try self.resolve(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index); + const field_ty = struct_ty.structFieldType(field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return null; @@ -2595,7 +2595,7 @@ pub const DeclGen = struct { const mod = self.module; const object_ty = object_ptr_ty.childType(mod); switch (object_ty.zigTypeTag(mod)) { - .Struct => switch (object_ty.containerLayout()) { + .Struct => switch (object_ty.containerLayout(mod)) { .Packed => unreachable, // TODO else => { const field_index_ty_ref = try self.intType(.unsigned, 32); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 0561ccbfda..7d033de584 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -360,13 +360,13 @@ pub const DeclState = struct { dbg_info_buffer.appendSliceAssumeCapacity(struct_name); dbg_info_buffer.appendAssumeCapacity(0); - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; if (struct_obj.layout == .Packed) { log.debug("TODO implement .debug_info for packed structs", .{}); break :blk; } - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.keys(), 0..) |field_name, field_index| { const field = fields.get(field_name).?; if (!field.ty.hasRuntimeBits(mod)) continue; diff --git a/src/type.zig b/src/type.zig index 2870b5616f..4e374a39d5 100644 --- a/src/type.zig +++ b/src/type.zig @@ -59,8 +59,6 @@ pub const Type = struct { .anyframe_T => return .AnyFrame, - .empty_struct, - .@"struct", .tuple, .anon_struct, => return .Struct, @@ -148,6 +146,7 @@ pub const Type = struct { .opt => unreachable, .enum_tag => unreachable, .simple_value => unreachable, + .aggregate => unreachable, }, } } @@ -501,16 +500,6 @@ pub const Type = struct { return a.elemType2(mod).eql(b.elemType2(mod), mod); }, - .empty_struct => { - const a_namespace = a.castTag(.empty_struct).?.data; - const b_namespace = (b.castTag(.empty_struct) orelse return false).data; - return a_namespace == b_namespace; - }, - .@"struct" => { - const a_struct_obj = a.castTag(.@"struct").?.data; - const b_struct_obj = (b.castTag(.@"struct") orelse return false).data; - return a_struct_obj == b_struct_obj; - }, .tuple => { if (!b.isSimpleTuple()) return false; @@ -720,15 +709,6 @@ pub const Type = struct { hashWithHasher(ty.childType(mod), hasher, mod); }, - .empty_struct => { - std.hash.autoHash(hasher, std.builtin.TypeId.Struct); - const namespace: *const Module.Namespace = ty.castTag(.empty_struct).?.data; - std.hash.autoHash(hasher, namespace); - }, - .@"struct" => { - const struct_obj: *const Module.Struct = ty.castTag(.@"struct").?.data; - std.hash.autoHash(hasher, struct_obj); - }, .tuple => { std.hash.autoHash(hasher, std.builtin.TypeId.Struct); @@ -955,8 +935,6 @@ pub const Type = struct { .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred), .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), - .empty_struct => return self.copyPayloadShallow(allocator, Payload.ContainerScope), - .@"struct" => return self.copyPayloadShallow(allocator, Payload.Struct), .@"union", .union_safety_tagged, .union_tagged => return self.copyPayloadShallow(allocator, Payload.Union), .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple), .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered), @@ -1033,14 +1011,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .empty_struct => return writer.writeAll("struct {}"), - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), struct_obj.owner_decl, - }); - }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; return writer.print("({s} decl={d})", .{ @@ -1247,22 +1217,10 @@ pub const Type = struct { /// Prints a name suitable for `@typeName`. pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void { switch (ty.ip_index) { - .empty_struct_type => try writer.writeAll("@TypeOf(.{})"), - .none => switch (ty.tag()) { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .empty_struct => { - const namespace = ty.castTag(.empty_struct).?.data; - try namespace.renderFullyQualifiedName(mod, "", writer); - }, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - const decl = mod.declPtr(struct_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Payload.Union).?.data; const decl = mod.declPtr(union_obj.owner_decl); @@ -1548,7 +1506,18 @@ pub const Type = struct { return; }, .simple_type => |s| return writer.writeAll(@tagName(s)), - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { + const decl = mod.declPtr(struct_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + } else if (struct_type.namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try namespace.renderFullyQualifiedName(mod, "", writer); + } else { + try writer.writeAll("@TypeOf(.{})"); + } + }, + .union_type => @panic("TODO"), .opaque_type => |opaque_type| { const decl = mod.declPtr(opaque_type.decl); @@ -1562,6 +1531,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, } } @@ -1624,12 +1594,10 @@ pub const Type = struct { }, // These are false because they are comptime-only types. - .empty_struct, // These are function *bodies*, not pointers. // Special exceptions have to be made when emitting functions due to // this returning false. - .function, - => return false, + .function => return false, .optional => { const child_ty = ty.optionalChild(mod); @@ -1646,28 +1614,6 @@ pub const Type = struct { } }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (struct_obj.status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - struct_obj.assumed_runtime_bits = true; - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(struct_obj.haveFieldTypes()), - .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy, - } - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - .enum_full => { const enum_full = ty.castTag(.enum_full).?.data; return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); @@ -1824,7 +1770,31 @@ pub const Type = struct { .generic_poison => unreachable, .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { + // This struct has no fields. + return false; + }; + if (struct_obj.status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + struct_obj.assumed_runtime_bits = true; + return true; + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(struct_obj.haveFieldTypes()), + .lazy => if (!struct_obj.haveFieldTypes()) return error.NeedLazy, + } + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (try field.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + .union_type => @panic("TODO"), .opaque_type => true, @@ -1835,6 +1805,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, } } @@ -1862,7 +1833,6 @@ pub const Type = struct { .anyframe_T, .tuple, .anon_struct, - .empty_struct, => false, .enum_full, @@ -1877,7 +1847,6 @@ pub const Type = struct { => ty.childType(mod).hasWellDefinedLayout(mod), .optional => ty.isPtrLikeOptional(mod), - .@"struct" => ty.castTag(.@"struct").?.data.layout != .Auto, .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto, .union_tagged => false, }, @@ -1936,7 +1905,13 @@ pub const Type = struct { .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { + // Struct with no fields has a well-defined layout of no bits. + return true; + }; + return struct_obj.layout != .Auto; + }, .union_type => @panic("TODO"), .opaque_type => false, @@ -1947,6 +1922,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -2146,68 +2122,6 @@ pub const Type = struct { .optional => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (opt_sema) |sema| { - if (struct_obj.status == .field_types_wip) { - // We'll guess "pointer-aligned", if the struct has an - // underaligned pointer field then some allocations - // might require explicit alignment. - return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; - } - _ = try sema.resolveTypeFields(ty); - } - if (!struct_obj.haveFieldTypes()) switch (strat) { - .eager => unreachable, // struct layout not resolved - .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }; - if (struct_obj.layout == .Packed) { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; - } - }, - .eager => {}, - } - assert(struct_obj.haveLayout()); - return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; - } - - const fields = ty.structFields(); - var big_align: u32 = 0; - for (fields.values()) |field| { - if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, - else => |e| return e, - })) continue; - - const field_align = if (field.abi_align != 0) - field.abi_align - else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |a| a, - .val => switch (strat) { - .eager => unreachable, // struct layout not resolved - .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }, - }; - big_align = @max(big_align, field_align); - - // This logic is duplicated in Module.Struct.Field.alignment. - if (struct_obj.layout == .Extern or target.ofmt == .c) { - if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { - // The C ABI requires 128 bit integer fields of structs - // to be 16-bytes aligned. - big_align = @max(big_align, 16); - } - } - } - return AbiAlignmentAdvanced{ .scalar = big_align }; - }, - .tuple, .anon_struct => { const tuple = ty.tupleFields(); var big_align: u32 = 0; @@ -2241,8 +2155,6 @@ pub const Type = struct { return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true); }, - .empty_struct => return AbiAlignmentAdvanced{ .scalar = 0 }, - .inferred_alloc_const, .inferred_alloc_mut, => unreachable, @@ -2337,7 +2249,69 @@ pub const Type = struct { .generic_poison => unreachable, .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + return AbiAlignmentAdvanced{ .scalar = 0 }; + + if (opt_sema) |sema| { + if (struct_obj.status == .field_types_wip) { + // We'll guess "pointer-aligned", if the struct has an + // underaligned pointer field then some allocations + // might require explicit alignment. + return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }; + } + _ = try sema.resolveTypeFields(ty); + } + if (!struct_obj.haveFieldTypes()) switch (strat) { + .eager => unreachable, // struct layout not resolved + .sema => unreachable, // handled above + .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + }; + if (struct_obj.layout == .Packed) { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => |arena| { + if (!struct_obj.haveLayout()) { + return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; + } + }, + .eager => {}, + } + assert(struct_obj.haveLayout()); + return AbiAlignmentAdvanced{ .scalar = struct_obj.backing_int_ty.abiAlignment(mod) }; + } + + const fields = ty.structFields(mod); + var big_align: u32 = 0; + for (fields.values()) |field| { + if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + else => |e| return e, + })) continue; + + const field_align = if (field.abi_align != 0) + field.abi_align + else switch (try field.ty.abiAlignmentAdvanced(mod, strat)) { + .scalar => |a| a, + .val => switch (strat) { + .eager => unreachable, // struct layout not resolved + .sema => unreachable, // handled above + .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + }, + }; + big_align = @max(big_align, field_align); + + // This logic is duplicated in Module.Struct.Field.alignment. + if (struct_obj.layout == .Extern or target.ofmt == .c) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { + // The C ABI requires 128 bit integer fields of structs + // to be 16-bytes aligned. + big_align = @max(big_align, 16); + } + } + } + return AbiAlignmentAdvanced{ .scalar = big_align }; + }, .union_type => @panic("TODO"), .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, @@ -2348,6 +2322,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, } } @@ -2517,42 +2492,16 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .empty_struct => return AbiSizeAdvanced{ .scalar = 0 }, - - .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) { - .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - }, - .eager => {}, - } - assert(struct_obj.haveLayout()); - return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; - }, - else => { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - } - }, - .eager => {}, - } - const field_count = ty.structFieldCount(); - if (field_count == 0) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; - }, + .tuple, .anon_struct => { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy, .eager => {}, + } + const field_count = ty.structFieldCount(mod); + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { @@ -2752,7 +2701,42 @@ pub const Type = struct { .generic_poison => unreachable, .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| switch (ty.containerLayout(mod)) { + .Packed => { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + return AbiSizeAdvanced{ .scalar = 0 }; + + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => |arena| { + if (!struct_obj.haveLayout()) { + return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; + } + }, + .eager => {}, + } + assert(struct_obj.haveLayout()); + return AbiSizeAdvanced{ .scalar = struct_obj.backing_int_ty.abiSize(mod) }; + }, + else => { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy => |arena| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + return AbiSizeAdvanced{ .scalar = 0 }; + if (!struct_obj.haveLayout()) { + return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; + } + }, + .eager => {}, + } + const field_count = ty.structFieldCount(mod); + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; + }, + }, .union_type => @panic("TODO"), .opaque_type => unreachable, // no size available @@ -2763,6 +2747,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, } } @@ -2850,189 +2835,189 @@ pub const Type = struct { ) Module.CompileError!u64 { const target = mod.getTarget(); - if (ty.ip_index != .none) switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.bits, - .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth() * 2, - }, - .array_type => |array_type| { - const len = array_type.len + @boolToInt(array_type.sentinel != .none); - if (len == 0) return 0; - const elem_ty = array_type.child.toType(); - const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); - if (elem_size == 0) return 0; - const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); - return (len - 1) * 8 * elem_size + elem_bit_size; - }, - .vector_type => |vector_type| { - const child_ty = vector_type.child.toType(); - const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); - return elem_bit_size * vector_type.len; - }, - .opt_type => @panic("TODO"), - .error_union_type => @panic("TODO"), - .simple_type => |t| switch (t) { - .f16 => return 16, - .f32 => return 32, - .f64 => return 64, - .f80 => return 80, - .f128 => return 128, - - .usize, - .isize, - .@"anyframe", - => return target.ptrBitWidth(), - - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .c_longdouble => return target.c_type_bit_size(.longdouble), - - .bool => return 1, - .void => return 0, - - // TODO revisit this when we have the concept of the error tag type - .anyerror => return 16, - - .anyopaque => unreachable, - .type => unreachable, - .comptime_int => unreachable, - .comptime_float => unreachable, - .noreturn => unreachable, - .null => unreachable, - .undefined => unreachable, - .enum_literal => unreachable, - .generic_poison => unreachable, - .var_args_param => unreachable, - - .atomic_order => unreachable, // missing call to resolveTypeFields - .atomic_rmw_op => unreachable, // missing call to resolveTypeFields - .calling_convention => unreachable, // missing call to resolveTypeFields - .address_space => unreachable, // missing call to resolveTypeFields - .float_mode => unreachable, // missing call to resolveTypeFields - .reduce_op => unreachable, // missing call to resolveTypeFields - .call_modifier => unreachable, // missing call to resolveTypeFields - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - .type_info => unreachable, // missing call to resolveTypeFields - }, - .struct_type => @panic("TODO"), - .union_type => @panic("TODO"), - .opaque_type => unreachable, - - // values, not types - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - }; - const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - switch (ty.tag()) { - .function => unreachable, // represents machine code; not a pointer - .empty_struct => unreachable, - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (struct_obj.layout != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); - assert(struct_obj.haveLayout()); - return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); - }, - - .tuple, .anon_struct => { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - var total: u64 = 0; - for (ty.tupleFields().types) |field_ty| { - total += try bitSizeAdvanced(field_ty, mod, opt_sema); - } - return total; - }, + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .function => unreachable, // represents machine code; not a pointer + .inferred_alloc_const => unreachable, + .inferred_alloc_mut => unreachable, - .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = try ty.intTagType(mod); - return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); - }, + .tuple, .anon_struct => { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + if (ty.containerLayout(mod) != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + var total: u64 = 0; + for (ty.tupleFields().types) |field_ty| { + total += try bitSizeAdvanced(field_ty, mod, opt_sema); + } + return total; + }, - .@"union", .union_safety_tagged, .union_tagged => { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout() != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - const union_obj = ty.cast(Payload.Union).?.data; - assert(union_obj.haveFieldTypes()); + .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { + const int_tag_ty = try ty.intTagType(mod); + return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); + }, - var size: u64 = 0; - for (union_obj.fields.values()) |field| { - size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); - } - return size; - }, + .@"union", .union_safety_tagged, .union_tagged => { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + if (ty.containerLayout(mod) != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + const union_obj = ty.cast(Payload.Union).?.data; + assert(union_obj.haveFieldTypes()); - .array => { - const payload = ty.castTag(.array).?.data; - const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); - if (elem_size == 0 or payload.len == 0) - return @as(u64, 0); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); - return (payload.len - 1) * 8 * elem_size + elem_bit_size; - }, - .array_sentinel => { - const payload = ty.castTag(.array_sentinel).?.data; - const elem_size = std.math.max( - payload.elem_type.abiAlignment(mod), - payload.elem_type.abiSize(mod), - ); - const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); - return payload.len * 8 * elem_size + elem_bit_size; - }, + var size: u64 = 0; + for (union_obj.fields.values()) |field| { + size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); + } + return size; + }, + + .array => { + const payload = ty.castTag(.array).?.data; + const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); + if (elem_size == 0 or payload.len == 0) + return @as(u64, 0); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); + return (payload.len - 1) * 8 * elem_size + elem_bit_size; + }, + .array_sentinel => { + const payload = ty.castTag(.array_sentinel).?.data; + const elem_size = std.math.max( + payload.elem_type.abiAlignment(mod), + payload.elem_type.abiSize(mod), + ); + const elem_bit_size = try bitSizeAdvanced(payload.elem_type, mod, opt_sema); + return payload.len * 8 * elem_size + elem_bit_size; + }, - .anyframe_T => return target.ptrBitWidth(), + .anyframe_T => return target.ptrBitWidth(), - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth(), + .pointer => switch (ty.castTag(.pointer).?.data.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth(), + }, + + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + => return 16, // TODO revisit this when we have the concept of the error tag type + + .optional, .error_union => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits, + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth() * 2, + }, + .array_type => |array_type| { + const len = array_type.len + @boolToInt(array_type.sentinel != .none); + if (len == 0) return 0; + const elem_ty = array_type.child.toType(); + const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); + if (elem_size == 0) return 0; + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); + return (len - 1) * 8 * elem_size + elem_bit_size; + }, + .vector_type => |vector_type| { + const child_ty = vector_type.child.toType(); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + return elem_bit_size * vector_type.len; + }, + .opt_type => @panic("TODO"), + .error_union_type => @panic("TODO"), + .simple_type => |t| switch (t) { + .f16 => return 16, + .f32 => return 32, + .f64 => return 64, + .f80 => return 80, + .f128 => return 128, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => return 16, // TODO revisit this when we have the concept of the error tag type + .usize, + .isize, + .@"anyframe", + => return target.ptrBitWidth(), + + .c_char => return target.c_type_bit_size(.char), + .c_short => return target.c_type_bit_size(.short), + .c_ushort => return target.c_type_bit_size(.ushort), + .c_int => return target.c_type_bit_size(.int), + .c_uint => return target.c_type_bit_size(.uint), + .c_long => return target.c_type_bit_size(.long), + .c_ulong => return target.c_type_bit_size(.ulong), + .c_longlong => return target.c_type_bit_size(.longlong), + .c_ulonglong => return target.c_type_bit_size(.ulonglong), + .c_longdouble => return target.c_type_bit_size(.longdouble), + + .bool => return 1, + .void => return 0, + + // TODO revisit this when we have the concept of the error tag type + .anyerror => return 16, + + .anyopaque => unreachable, + .type => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .noreturn => unreachable, + .null => unreachable, + .undefined => unreachable, + .enum_literal => unreachable, + .generic_poison => unreachable, + .var_args_param => unreachable, + + .atomic_order => unreachable, // missing call to resolveTypeFields + .atomic_rmw_op => unreachable, // missing call to resolveTypeFields + .calling_convention => unreachable, // missing call to resolveTypeFields + .address_space => unreachable, // missing call to resolveTypeFields + .float_mode => unreachable, // missing call to resolveTypeFields + .reduce_op => unreachable, // missing call to resolveTypeFields + .call_modifier => unreachable, // missing call to resolveTypeFields + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + .type_info => unreachable, // missing call to resolveTypeFields + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; + if (struct_obj.layout != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); + assert(struct_obj.haveLayout()); + return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); + }, + + .union_type => @panic("TODO"), + .opaque_type => unreachable, - .optional, .error_union => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + // values, not types + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .ptr => unreachable, + .opt => unreachable, + .enum_tag => unreachable, + .aggregate => unreachable, }, } } /// Returns true if the type's layout is already resolved and it is safe /// to use `abiSize`, `abiAlignment` and `bitSize` on it. - pub fn layoutIsResolved(ty: Type, mod: *const Module) bool { + pub fn layoutIsResolved(ty: Type, mod: *Module) bool { switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.castTag(.@"struct")) |struct_ty| { - return struct_ty.data.haveLayout(); + if (mod.typeToStruct(ty)) |struct_obj| { + return struct_obj.haveLayout(); } return true; }, @@ -3500,18 +3485,23 @@ pub const Type = struct { } } - pub fn containerLayout(ty: Type) std.builtin.Type.ContainerLayout { + pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { return switch (ty.ip_index) { .empty_struct_type => .Auto, .none => switch (ty.tag()) { .tuple, .anon_struct => .Auto, - .@"struct" => ty.castTag(.@"struct").?.data.layout, .@"union" => ty.castTag(.@"union").?.data.layout, .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.layout, .union_tagged => ty.castTag(.union_tagged).?.data.layout, else => unreachable, }, - else => unreachable, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; + return struct_obj.layout; + }, + else => unreachable, + }, }; } @@ -3631,14 +3621,16 @@ pub const Type = struct { .array_sentinel => ty.castTag(.array_sentinel).?.data.len, .tuple => ty.castTag(.tuple).?.data.types.len, .anon_struct => ty.castTag(.anon_struct).?.data.types.len, - .@"struct" => ty.castTag(.@"struct").?.data.fields.count(), - .empty_struct => 0, else => unreachable, }, else => switch (ip.indexToKey(ty.ip_index)) { .vector_type => |vector_type| vector_type.len, .array_type => |array_type| array_type.len, + .struct_type => |struct_type| { + const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0; + return struct_obj.fields.count(); + }, else => unreachable, }, }; @@ -3665,11 +3657,9 @@ pub const Type = struct { /// Asserts the type is an array, pointer or vector. pub fn sentinel(ty: Type, mod: *const Module) ?Value { return switch (ty.ip_index) { - .empty_struct_type => null, .none => switch (ty.tag()) { .array, .tuple, - .@"struct", => null, .pointer => ty.castTag(.pointer).?.data.sentinel, @@ -3721,16 +3711,16 @@ pub const Type = struct { /// Returns true for integers, enums, error sets, and packed structs. /// If this function returns true, then intInfo() can be called on the type. - pub fn isAbiInt(ty: Type, mod: *const Module) bool { + pub fn isAbiInt(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Int, .Enum, .ErrorSet => true, - .Struct => ty.containerLayout() == .Packed, + .Struct => ty.containerLayout(mod) == .Packed, else => false, }; } /// Asserts the type is an integer, enum, error set, or vector of one of them. - pub fn intInfo(starting_ty: Type, mod: *const Module) InternPool.Key.IntType { + pub fn intInfo(starting_ty: Type, mod: *Module) InternPool.Key.IntType { const target = mod.getTarget(); var ty = starting_ty; @@ -3750,12 +3740,6 @@ pub const Type = struct { return .{ .signedness = .unsigned, .bits = 16 }; }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.layout == .Packed); - ty = struct_obj.backing_int_ty; - }, - else => unreachable, }, .anyerror_type => { @@ -3775,6 +3759,12 @@ pub const Type = struct { .c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| return int_type, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.layout == .Packed); + ty = struct_obj.backing_int_ty; + }, + .ptr_type => unreachable, .array_type => unreachable, .vector_type => |vector_type| ty = vector_type.child.toType(), @@ -3782,7 +3772,7 @@ pub const Type = struct { .opt_type => unreachable, .error_union_type => unreachable, .simple_type => unreachable, // handled via Index enum tag above - .struct_type => @panic("TODO"), + .union_type => unreachable, .opaque_type => unreachable, @@ -3793,6 +3783,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -3996,17 +3987,6 @@ pub const Type = struct { } }, - .@"struct" => { - const s = ty.castTag(.@"struct").?.data; - assert(s.haveFieldTypes()); - for (s.fields.values()) |field| { - if (field.is_comptime) continue; - if ((try field.ty.onePossibleValue(mod)) != null) continue; - return null; - } - return Value.empty_struct; - }, - .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.values, 0..) |val, i| { @@ -4069,8 +4049,6 @@ pub const Type = struct { return Value.empty_struct; }, - .empty_struct => return Value.empty_struct, - .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); @@ -4158,7 +4136,23 @@ pub const Type = struct { .generic_poison => unreachable, .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + if (mod.structPtrUnwrap(struct_type.index)) |s| { + assert(s.haveFieldTypes()); + for (s.fields.values()) |field| { + if (field.is_comptime) continue; + if ((try field.ty.onePossibleValue(mod)) != null) continue; + return null; + } + } + // In this case the struct has no fields and therefore has one possible value. + const empty = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .fields = &.{}, + } }); + return empty.toValue(); + }, + .union_type => @panic("TODO"), .opaque_type => return null, @@ -4169,6 +4163,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -4177,12 +4172,11 @@ pub const Type = struct { /// resolves field types rather than asserting they are already resolved. /// TODO merge these implementations together with the "advanced" pattern seen /// elsewhere in this file. - pub fn comptimeOnly(ty: Type, mod: *const Module) bool { + pub fn comptimeOnly(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { .empty_struct_type => false, .none => switch (ty.tag()) { - .empty_struct, .error_set, .error_set_single, .error_set_inferred, @@ -4222,20 +4216,6 @@ pub const Type = struct { return false; }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .wip, .unknown => { - // Return false to avoid incorrect dependency loops. - // This will be handled correctly once merged with - // `Sema.typeRequiresComptime`. - return false; - }, - .no => return false, - .yes => return true, - } - }, - .@"union", .union_safety_tagged, .union_tagged => { const union_obj = ty.cast(Type.Payload.Union).?.data; switch (union_obj.requires_comptime) { @@ -4326,7 +4306,21 @@ pub const Type = struct { .var_args_param => unreachable, }, - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + // A struct with no fields is not comptime-only. + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .wip, .unknown => { + // Return false to avoid incorrect dependency loops. + // This will be handled correctly once merged with + // `Sema.typeRequiresComptime`. + return false; + }, + .no => return false, + .yes => return true, + } + }, + .union_type => @panic("TODO"), .opaque_type => false, @@ -4337,6 +4331,7 @@ pub const Type = struct { .ptr => unreachable, .opt => unreachable, .enum_tag => unreachable, + .aggregate => unreachable, }, }; } @@ -4352,19 +4347,19 @@ pub const Type = struct { }; } - pub fn isIndexable(ty: Type, mod: *const Module) bool { + pub fn isIndexable(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, .Pointer => switch (ty.ptrSize(mod)) { .Slice, .Many, .C => true, .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, - .Struct => ty.isTuple(), + .Struct => ty.isTuple(mod), else => false, }; } - pub fn indexableHasLen(ty: Type, mod: *const Module) bool { + pub fn indexableHasLen(ty: Type, mod: *Module) bool { return switch (ty.zigTypeTag(mod)) { .Array, .Vector => true, .Pointer => switch (ty.ptrSize(mod)) { @@ -4372,7 +4367,7 @@ pub const Type = struct { .Slice => true, .One => ty.childType(mod).zigTypeTag(mod) == .Array, }, - .Struct => ty.isTuple(), + .Struct => ty.isTuple(mod), else => false, }; } @@ -4381,10 +4376,8 @@ pub const Type = struct { pub fn getNamespaceIndex(ty: Type, mod: *Module) Module.Namespace.OptionalIndex { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .@"struct" => ty.castTag(.@"struct").?.data.namespace.toOptional(), .enum_full => ty.castTag(.enum_full).?.data.namespace.toOptional(), .enum_nonexhaustive => ty.castTag(.enum_nonexhaustive).?.data.namespace.toOptional(), - .empty_struct => @panic("TODO"), .@"union" => ty.castTag(.@"union").?.data.namespace.toOptional(), .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.namespace.toOptional(), .union_tagged => ty.castTag(.union_tagged).?.data.namespace.toOptional(), @@ -4393,6 +4386,7 @@ pub const Type = struct { }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + .struct_type => |struct_type| struct_type.namespace, else => .none, }, }; @@ -4618,161 +4612,188 @@ pub const Type = struct { } } - pub fn structFields(ty: Type) Module.Struct.Fields { - return switch (ty.ip_index) { - .empty_struct_type => .{}, - .none => switch (ty.tag()) { - .empty_struct => .{}, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields; - }, - else => unreachable, + pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .{}; + assert(struct_obj.haveFieldTypes()); + return struct_obj.fields; }, else => unreachable, - }; + } } - pub fn structFieldName(ty: Type, field_index: usize) []const u8 { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields.keys()[field_index]; + pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) []const u8 { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .anon_struct => return ty.castTag(.anon_struct).?.data.names[field_index], + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.haveFieldTypes()); + return struct_obj.fields.keys()[field_index]; + }, + else => unreachable, }, - .anon_struct => return ty.castTag(.anon_struct).?.data.names[field_index], - else => unreachable, } } - pub fn structFieldCount(ty: Type) usize { + pub fn structFieldCount(ty: Type, mod: *Module) usize { return switch (ty.ip_index) { .empty_struct_type => 0, .none => switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; + .tuple => ty.castTag(.tuple).?.data.types.len, + .anon_struct => ty.castTag(.anon_struct).?.data.types.len, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; assert(struct_obj.haveFieldTypes()); return struct_obj.fields.count(); }, - .empty_struct => 0, - .tuple => ty.castTag(.tuple).?.data.types.len, - .anon_struct => ty.castTag(.anon_struct).?.data.types.len, else => unreachable, }, - else => unreachable, }; } /// Supports structs and unions. - pub fn structFieldType(ty: Type, index: usize) Type { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.fields.values()[index].ty; + pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + return union_obj.fields.values()[index].ty; + }, + .tuple => return ty.castTag(.tuple).?.data.types[index], + .anon_struct => return ty.castTag(.anon_struct).?.data.types[index], + else => unreachable, }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].ty; + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.fields.values()[index].ty; + }, + else => unreachable, }, - .tuple => return ty.castTag(.tuple).?.data.types[index], - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index], - else => unreachable, - } + }; } pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.layout != .Packed); - return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .@"union", .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + return union_obj.fields.values()[index].normalAlignment(mod); + }, + .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), + .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), + else => unreachable, }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].normalAlignment(mod); + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.layout != .Packed); + return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); + }, + else => unreachable, }, - .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), - else => unreachable, } } - pub fn structFieldDefaultValue(ty: Type, index: usize) Value { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.fields.values()[index].default_val; - }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - return tuple.values[index]; + pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple => { + const tuple = ty.castTag(.tuple).?.data; + return tuple.values[index]; + }, + .anon_struct => { + const struct_obj = ty.castTag(.anon_struct).?.data; + return struct_obj.values[index]; + }, + else => unreachable, }, - .anon_struct => { - const struct_obj = ty.castTag(.anon_struct).?.data; - return struct_obj.values[index]; + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.fields.values()[index].default_val; + }, + else => unreachable, }, - else => unreachable, } } pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - const field = struct_obj.fields.values()[index]; - if (field.is_comptime) { - return field.default_val; - } else { - return field.ty.onePossibleValue(mod); - } - }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - const val = tuple.values[index]; - if (val.ip_index == .unreachable_value) { - return tuple.types[index].onePossibleValue(mod); - } else { - return val; - } + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple => { + const tuple = ty.castTag(.tuple).?.data; + const val = tuple.values[index]; + if (val.ip_index == .unreachable_value) { + return tuple.types[index].onePossibleValue(mod); + } else { + return val; + } + }, + .anon_struct => { + const anon_struct = ty.castTag(.anon_struct).?.data; + const val = anon_struct.values[index]; + if (val.ip_index == .unreachable_value) { + return anon_struct.types[index].onePossibleValue(mod); + } else { + return val; + } + }, + else => unreachable, }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - const val = anon_struct.values[index]; - if (val.ip_index == .unreachable_value) { - return anon_struct.types[index].onePossibleValue(mod); - } else { - return val; - } + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field = struct_obj.fields.values()[index]; + if (field.is_comptime) { + return field.default_val; + } else { + return field.ty.onePossibleValue(mod); + } + }, + else => unreachable, }, - else => unreachable, } } - pub fn structFieldIsComptime(ty: Type, index: usize) bool { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - if (struct_obj.layout == .Packed) return false; - const field = struct_obj.fields.values()[index]; - return field.is_comptime; - }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - const val = tuple.values[index]; - return val.ip_index != .unreachable_value; + pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple => { + const tuple = ty.castTag(.tuple).?.data; + const val = tuple.values[index]; + return val.ip_index != .unreachable_value; + }, + .anon_struct => { + const anon_struct = ty.castTag(.anon_struct).?.data; + const val = anon_struct.values[index]; + return val.ip_index != .unreachable_value; + }, + else => unreachable, }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - const val = anon_struct.values[index]; - return val.ip_index != .unreachable_value; + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + if (struct_obj.layout == .Packed) return false; + const field = struct_obj.fields.values()[index]; + return field.is_comptime; + }, + else => unreachable, }, - else => unreachable, } } pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_type = mod.intern_pool.indexToKey(ty.ip_index).struct_type; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.layout == .Packed); comptime assert(Type.packed_struct_layout_version == 2); @@ -4833,7 +4854,8 @@ pub const Type = struct { /// Get an iterator that iterates over all the struct field, returning the field and /// offset of that field. Asserts that the type is a non-packed struct. pub fn iterateStructOffsets(ty: Type, mod: *Module) StructOffsetIterator { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_type = mod.intern_pool.indexToKey(ty.ip_index).struct_type; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.haveLayout()); assert(struct_obj.layout != .Packed); return .{ .struct_obj = struct_obj, .module = mod }; @@ -4841,57 +4863,62 @@ pub const Type = struct { /// Supports structs and unions. pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.haveLayout()); - assert(struct_obj.layout != .Packed); - var it = ty.iterateStructOffsets(mod); - while (it.next()) |field_offset| { - if (index == field_offset.field) - return field_offset.offset; - } - - return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); - }, + switch (ty.ip_index) { + .none => switch (ty.tag()) { + .tuple, .anon_struct => { + const tuple = ty.tupleFields(); - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); + var offset: u64 = 0; + var big_align: u32 = 0; - var offset: u64 = 0; - var big_align: u32 = 0; + for (tuple.types, 0..) |field_ty, i| { + const field_val = tuple.values[i]; + if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { + // comptime field + if (i == index) return offset; + continue; + } - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { - // comptime field + const field_align = field_ty.abiAlignment(mod); + big_align = @max(big_align, field_align); + offset = std.mem.alignForwardGeneric(u64, offset, field_align); if (i == index) return offset; - continue; + offset += field_ty.abiSize(mod); } + offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); + return offset; + }, - const field_align = field_ty.abiAlignment(mod); - big_align = @max(big_align, field_align); - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - if (i == index) return offset; - offset += field_ty.abiSize(mod); - } - offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); - return offset; + .@"union" => return 0, + .union_safety_tagged, .union_tagged => { + const union_obj = ty.cast(Payload.Union).?.data; + const layout = union_obj.getLayout(mod, true); + if (layout.tag_align >= layout.payload_align) { + // {Tag, Payload} + return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + } else { + // {Payload, Tag} + return 0; + } + }, + else => unreachable, }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.haveLayout()); + assert(struct_obj.layout != .Packed); + var it = ty.iterateStructOffsets(mod); + while (it.next()) |field_offset| { + if (index == field_offset.field) + return field_offset.offset; + } - .@"union" => return 0, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const layout = union_obj.getLayout(mod, true); - if (layout.tag_align >= layout.payload_align) { - // {Tag, Payload} - return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); - } else { - // {Payload, Tag} - return 0; - } + return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); + }, + + else => unreachable, }, - else => unreachable, } } @@ -4901,6 +4928,7 @@ pub const Type = struct { pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { switch (ty.ip_index) { + .empty_struct_type => return null, .none => switch (ty.tag()) { .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; @@ -4914,10 +4942,6 @@ pub const Type = struct { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.srcLoc(mod); }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.srcLoc(mod); - }, .error_set => { const error_set = ty.castTag(.error_set).?.data; return error_set.srcLoc(mod); @@ -4930,7 +4954,10 @@ pub const Type = struct { else => return null, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.srcLoc(mod); + }, .union_type => @panic("TODO"), .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), else => null, @@ -4954,10 +4981,6 @@ pub const Type = struct { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.owner_decl; }, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return struct_obj.owner_decl; - }, .error_set => { const error_set = ty.castTag(.error_set).?.data; return error_set.owner_decl; @@ -4970,7 +4993,10 @@ pub const Type = struct { else => return null, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => @panic("TODO"), + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; + return struct_obj.owner_decl; + }, .union_type => @panic("TODO"), .opaque_type => |opaque_type| opaque_type.decl, else => null, @@ -5013,8 +5039,6 @@ pub const Type = struct { /// The type is the inferred error set of a specific function. error_set_inferred, error_set_merged, - empty_struct, - @"struct", @"union", union_safety_tagged, union_tagged, @@ -5046,12 +5070,10 @@ pub const Type = struct { .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, - .@"struct" => Payload.Struct, .@"union", .union_safety_tagged, .union_tagged => Payload.Union, .enum_full, .enum_nonexhaustive => Payload.EnumFull, .enum_simple => Payload.EnumSimple, .enum_numbered => Payload.EnumNumbered, - .empty_struct => Payload.ContainerScope, .tuple => Payload.Tuple, .anon_struct => Payload.AnonStruct, }; @@ -5082,15 +5104,19 @@ pub const Type = struct { } }; - pub fn isTuple(ty: Type) bool { + pub fn isTuple(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { - .empty_struct_type => true, .none => switch (ty.tag()) { .tuple => true, - .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, else => false, }, - else => false, // TODO struct + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + return struct_obj.is_tuple; + }, + else => false, + }, }; } @@ -5101,36 +5127,41 @@ pub const Type = struct { .anon_struct => true, else => false, }, - else => false, // TODO struct + else => false, }; } - pub fn isTupleOrAnonStruct(ty: Type) bool { + pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { .empty_struct_type => true, .none => switch (ty.tag()) { .tuple, .anon_struct => true, - .@"struct" => ty.castTag(.@"struct").?.data.is_tuple, else => false, }, - else => false, // TODO struct + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + return struct_obj.is_tuple; + }, + else => false, + }, }; } pub fn isSimpleTuple(ty: Type) bool { return switch (ty.ip_index) { - .empty_struct => true, + .empty_struct_type => true, .none => switch (ty.tag()) { .tuple => true, else => false, }, - else => false, // TODO + else => false, }; } pub fn isSimpleTupleOrAnonStruct(ty: Type) bool { return switch (ty.ip_index) { - .empty_struct => true, + .empty_struct_type => true, .none => switch (ty.tag()) { .tuple, .anon_struct => true, else => false, @@ -5142,7 +5173,7 @@ pub const Type = struct { // Only allowed for simple tuple types pub fn tupleFields(ty: Type) Payload.Tuple.Data { return switch (ty.ip_index) { - .empty_struct => .{ .types = &.{}, .values = &.{} }, + .empty_struct_type => .{ .types = &.{}, .values = &.{} }, .none => switch (ty.tag()) { .tuple => ty.castTag(.tuple).?.data, .anon_struct => .{ @@ -5319,18 +5350,6 @@ pub const Type = struct { data: []const u8, }; - /// Mostly used for namespace like structs with zero fields. - /// Most commonly used for files. - pub const ContainerScope = struct { - base: Payload, - data: *Module.Namespace, - }; - - pub const Struct = struct { - base: Payload = .{ .tag = .@"struct" }, - data: *Module.Struct, - }; - pub const Tuple = struct { base: Payload = .{ .tag = .tuple }, data: Data, diff --git a/src/value.zig b/src/value.zig index c95f218dbe..3992888b3d 100644 --- a/src/value.zig +++ b/src/value.zig @@ -996,10 +996,10 @@ pub const Value = struct { const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, .Extern => { - const fields = ty.structFields().values(); + const fields = ty.structFields(mod).values(); const field_vals = val.castTag(.aggregate).?.data; for (fields, 0..) |field, i| { const off = @intCast(usize, ty.structFieldOffset(i, mod)); @@ -1017,7 +1017,7 @@ pub const Value = struct { const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?; std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Auto => return error.IllDefinedMemoryLayout, .Extern => return error.Unimplemented, .Packed => { @@ -1119,12 +1119,12 @@ pub const Value = struct { bits += elem_bit_size; } }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled in non-packed writeToMemory .Packed => { var bits: u16 = 0; - const fields = ty.structFields().values(); + const fields = ty.structFields(mod).values(); const field_vals = val.castTag(.aggregate).?.data; for (fields, 0..) |field, i| { const field_bits = @intCast(u16, field.ty.bitSize(mod)); @@ -1133,7 +1133,7 @@ pub const Value = struct { } }, }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled in non-packed writeToMemory .Packed => { @@ -1236,14 +1236,14 @@ pub const Value = struct { const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => { - const fields = ty.structFields().values(); + const fields = ty.structFields(mod).values(); const field_vals = try arena.alloc(Value, fields.len); for (fields, 0..) |field, i| { const off = @intCast(usize, ty.structFieldOffset(i, mod)); - const sz = @intCast(usize, ty.structFieldType(i).abiSize(mod)); + const sz = @intCast(usize, ty.structFieldType(i, mod).abiSize(mod)); field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); } return Tag.aggregate.create(arena, field_vals); @@ -1346,12 +1346,12 @@ pub const Value = struct { } return Tag.aggregate.create(arena, elems); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled by non-packed readFromMemory .Packed => { var bits: u16 = 0; - const fields = ty.structFields().values(); + const fields = ty.structFields(mod).values(); const field_vals = try arena.alloc(Value, fields.len); for (fields, 0..) |field, i| { const field_bits = @intCast(u16, field.ty.bitSize(mod)); @@ -1996,7 +1996,7 @@ pub const Value = struct { } if (ty.zigTypeTag(mod) == .Struct) { - const fields = ty.structFields().values(); + const fields = ty.structFields(mod).values(); assert(fields.len == a_field_vals.len); for (fields, 0..) |field, i| { if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) { @@ -2019,7 +2019,7 @@ pub const Value = struct { .@"union" => { const a_union = a.castTag(.@"union").?.data; const b_union = b.castTag(.@"union").?.data; - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed, .Extern => { const tag_ty = ty.unionTagTypeHypothetical(); if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) { @@ -2252,7 +2252,7 @@ pub const Value = struct { .aggregate => { const field_values = val.castTag(.aggregate).?.data; for (field_values, 0..) |field_val, i| { - const field_ty = ty.structFieldType(i); + const field_ty = ty.structFieldType(i, mod); field_val.hash(field_ty, hasher, mod); } }, @@ -2623,7 +2623,7 @@ pub const Value = struct { const data = val.castTag(.field_ptr).?.data; if (data.container_ptr.pointerDecl()) |decl_index| { const container_decl = mod.declPtr(decl_index); - const field_type = data.container_ty.structFieldType(data.field_index); + const field_type = data.container_ty.structFieldType(data.field_index, mod); const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index); return field_val.elemValue(mod, index); } else unreachable; @@ -2758,16 +2758,6 @@ pub const Value = struct { pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { switch (val.ip_index) { .undef => return Value.undef, - .empty_struct => { - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - return tuple.values[index]; - } - if (try ty.structFieldValueComptime(mod, index)) |some| { - return some; - } - unreachable; - }, .none => switch (val.tag()) { .aggregate => { @@ -2784,7 +2774,10 @@ pub const Value = struct { else => unreachable, }, - else => unreachable, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .aggregate => |aggregate| aggregate.fields[index].toValue(), + else => unreachable, + }, } } -- cgit v1.2.3 From 3ba099bfba9d3c38fe188010aa82fc589b1cabf6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 10 May 2023 17:21:22 -0700 Subject: stage2: move union types and values to InternPool --- src/InternPool.zig | 173 ++++++++++++++++--- src/Module.zig | 77 ++++++--- src/Sema.zig | 381 ++++++++++++++++++++++-------------------- src/TypedValue.zig | 4 +- src/arch/aarch64/abi.zig | 4 +- src/arch/arm/abi.zig | 4 +- src/arch/wasm/CodeGen.zig | 10 +- src/arch/wasm/abi.zig | 10 +- src/arch/x86_64/CodeGen.zig | 4 +- src/arch/x86_64/abi.zig | 2 +- src/codegen.zig | 6 +- src/codegen/c.zig | 94 ++++++----- src/codegen/c/type.zig | 26 +-- src/codegen/llvm.zig | 14 +- src/codegen/spirv.zig | 10 +- src/link/Dwarf.zig | 4 +- src/type.zig | 399 +++++++++++++++++++------------------------- src/value.zig | 12 +- 18 files changed, 688 insertions(+), 546 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index 315865c966..4c4e3ab78a 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -21,6 +21,13 @@ allocated_structs: std.SegmentedList(Module.Struct, 0) = .{}, /// When a Struct object is freed from `allocated_structs`, it is pushed into this stack. structs_free_list: std.ArrayListUnmanaged(Module.Struct.Index) = .{}, +/// Union objects are stored in this data structure because: +/// * They contain pointers such as the field maps. +/// * They need to be mutated after creation. +allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, +/// When a Union object is freed from `allocated_unions`, it is pushed into this stack. +unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, + const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -59,10 +66,7 @@ pub const Key = union(enum) { /// If `empty_struct_type` is handled separately, then this value may be /// safely assumed to never be `none`. struct_type: StructType, - union_type: struct { - fields_len: u32, - // TODO move Module.Union data to InternPool - }, + union_type: UnionType, opaque_type: OpaqueType, simple_value: SimpleValue, @@ -87,6 +91,8 @@ pub const Key = union(enum) { /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, /// so the slice length will be one more than the type's array length. aggregate: Aggregate, + /// An instance of a union. + un: Union, pub const IntType = std.builtin.Type.Int; @@ -145,13 +151,27 @@ pub const Key = union(enum) { /// - index == .none /// * A struct which has fields as well as a namepace. pub const StructType = struct { - /// This will be `none` only in the case of `@TypeOf(.{})` - /// (`Index.empty_struct_type`). - namespace: Module.Namespace.OptionalIndex, /// The `none` tag is used to represent two cases: /// * `@TypeOf(.{})`, in which case `namespace` will also be `none`. /// * A struct with no fields, in which case `namespace` will be populated. index: Module.Struct.OptionalIndex, + /// This will be `none` only in the case of `@TypeOf(.{})` + /// (`Index.empty_struct_type`). + namespace: Module.Namespace.OptionalIndex, + }; + + pub const UnionType = struct { + index: Module.Union.Index, + runtime_tag: RuntimeTag, + + pub const RuntimeTag = enum { none, safety, tagged }; + + pub fn hasTag(self: UnionType) bool { + return switch (self.runtime_tag) { + .none => false, + .tagged, .safety => true, + }; + } }; pub const Int = struct { @@ -198,6 +218,15 @@ pub const Key = union(enum) { val: Index, }; + pub const Union = struct { + /// This is the union type; not the field type. + ty: Index, + /// Indicates the active field. + tag: Index, + /// The value of the active field. + val: Index, + }; + pub const Aggregate = struct { ty: Index, fields: []const Index, @@ -229,12 +258,10 @@ pub const Key = union(enum) { .extern_func, .opt, .struct_type, + .union_type, + .un, => |info| std.hash.autoHash(hasher, info), - .union_type => |union_type| { - _ = union_type; - @panic("TODO"); - }, .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), .int => |int| { @@ -320,6 +347,14 @@ pub const Key = union(enum) { const b_info = b.struct_type; return std.meta.eql(a_info, b_info); }, + .union_type => |a_info| { + const b_info = b.union_type; + return std.meta.eql(a_info, b_info); + }, + .un => |a_info| { + const b_info = b.un; + return std.meta.eql(a_info, b_info); + }, .ptr => |a_info| { const b_info = b.ptr; @@ -371,14 +406,6 @@ pub const Key = union(enum) { @panic("TODO"); }, - .union_type => |a_info| { - const b_info = b.union_type; - - _ = a_info; - _ = b_info; - @panic("TODO"); - }, - .opaque_type => |a_info| { const b_info = b.opaque_type; return a_info.decl == b_info.decl; @@ -411,6 +438,7 @@ pub const Key = union(enum) { .extern_func, .enum_tag, .aggregate, + .un, => |x| return x.ty, .simple_value => |s| switch (s) { @@ -838,6 +866,15 @@ pub const Tag = enum(u8) { /// Module.Struct object allocated for it. /// data is Module.Namespace.Index. type_struct_ns, + /// A tagged union type. + /// `data` is `Module.Union.Index`. + type_union_tagged, + /// An untagged union type. It also has no safety tag. + /// `data` is `Module.Union.Index`. + type_union_untagged, + /// An untagged union type which has a safety tag. + /// `data` is `Module.Union.Index`. + type_union_safety, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. @@ -908,6 +945,8 @@ pub const Tag = enum(u8) { /// * A struct which has 0 fields. /// data is Index of the type, which is known to be zero bits at runtime. only_possible_value, + /// data is extra index to Key.Union. + union_value, }; /// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to @@ -1141,6 +1180,9 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.structs_free_list.deinit(gpa); ip.allocated_structs.deinit(gpa); + ip.unions_free_list.deinit(gpa); + ip.allocated_unions.deinit(gpa); + ip.* = undefined; } @@ -1233,6 +1275,19 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .namespace = @intToEnum(Module.Namespace.Index, data).toOptional(), } }, + .type_union_untagged => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .none, + } }, + .type_union_tagged => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .tagged, + } }, + .type_union_safety => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .safety, + } }, + .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, @@ -1303,6 +1358,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { else => unreachable, }; }, + .union_value => .{ .un = ip.extraData(Key.Union, data) }, }; } @@ -1350,7 +1406,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } - // TODO introduce more pointer encodings ip.items.appendAssumeCapacity(.{ .tag = .type_pointer, .data = try ip.addExtra(gpa, Pointer{ @@ -1450,8 +1505,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .union_type => |union_type| { - _ = union_type; - @panic("TODO"); + ip.items.appendAssumeCapacity(.{ + .tag = switch (union_type.runtime_tag) { + .none => .type_union_untagged, + .safety => .type_union_safety, + .tagged => .type_union_tagged, + }, + .data = @enumToInt(union_type.index), + }); }, .opaque_type => |opaque_type| { @@ -1642,6 +1703,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } @panic("TODO"); }, + + .un => |un| { + assert(un.ty != .none); + assert(un.tag != .none); + assert(un.val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .union_value, + .data = try ip.addExtra(gpa, un), + }); + }, } return @intToEnum(Index, ip.items.len - 1); } @@ -1923,6 +1994,17 @@ pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex { return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); } +pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex { + const tags = ip.items.items(.tag); + if (val == .none) return .none; + switch (tags[@enumToInt(val)]) { + .type_union_tagged, .type_union_untagged, .type_union_safety => {}, + else => return .none, + } + const datas = ip.items.items(.data); + return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); +} + pub fn isOptionalType(ip: InternPool, ty: Index) bool { const tags = ip.items.items(.tag); if (ty == .none) return false; @@ -1937,15 +2019,22 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { const items_size = (1 + 4) * ip.items.len; const extra_size = 4 * ip.extra.items.len; const limbs_size = 8 * ip.limbs.items.len; + const structs_size = ip.allocated_structs.len * + (@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); + const unions_size = ip.allocated_unions.len * + (@sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); // TODO: map overhead size is not taken into account - const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size; + const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + + structs_size + unions_size; std.debug.print( \\InternPool size: {d} bytes \\ {d} items: {d} bytes \\ {d} extra: {d} bytes \\ {d} limbs: {d} bytes + \\ {d} structs: {d} bytes + \\ {d} unions: {d} bytes \\ , .{ total_size, @@ -1955,6 +2044,10 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { extra_size, ip.limbs.items.len, limbs_size, + ip.allocated_structs.len, + structs_size, + ip.allocated_unions.len, + unions_size, }); const tags = ip.items.items(.tag); @@ -1980,8 +2073,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_error_union => @sizeOf(ErrorUnion), .type_enum_simple => @sizeOf(EnumSimple), .type_opaque => @sizeOf(Key.OpaqueType), - .type_struct => 0, - .type_struct_ns => 0, + .type_struct => @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + .type_struct_ns => @sizeOf(Module.Namespace), + + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + .simple_type => 0, .simple_value => 0, .ptr_int => @sizeOf(PtrInt), @@ -2010,6 +2109,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .extern_func => @panic("TODO"), .func => @panic("TODO"), .only_possible_value => 0, + .union_value => @sizeOf(Key.Union), }); } const SortContext = struct { @@ -2041,6 +2141,10 @@ pub fn structPtrUnwrapConst(ip: InternPool, index: Module.Struct.OptionalIndex) return structPtrConst(ip, index.unwrap() orelse return null); } +pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { + return ip.allocated_unions.at(@enumToInt(index)); +} + pub fn createStruct( ip: *InternPool, gpa: Allocator, @@ -2059,3 +2163,22 @@ pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index // allocation failures here, instead leaking the Struct until garbage collection. }; } + +pub fn createUnion( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Union, +) Allocator.Error!Module.Union.Index { + if (ip.unions_free_list.popOrNull()) |index| return index; + const ptr = try ip.allocated_unions.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Union.Index, ip.allocated_unions.len - 1); +} + +pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) void { + ip.unionPtr(index).* = undefined; + ip.unions_free_list.append(gpa, index) catch { + // In order to keep `destroyUnion` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Union until garbage collection. + }; +} diff --git a/src/Module.zig b/src/Module.zig index ada69537f6..6478f7ce4f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -851,11 +851,10 @@ pub const Decl = struct { /// If the Decl has a value and it is a union, return it, /// otherwise null. - pub fn getUnion(decl: *Decl) ?*Union { + pub fn getUnion(decl: *Decl, mod: *Module) ?*Union { if (!decl.owns_tv) return null; const ty = (decl.val.castTag(.ty) orelse return null).data; - const union_obj = (ty.cast(Type.Payload.Union) orelse return null).data; - return union_obj; + return mod.typeToUnion(ty); } /// If the Decl has a value and it is a function, return it, @@ -896,10 +895,6 @@ pub const Decl = struct { const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; return enum_obj.namespace.toOptional(); }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - return union_obj.namespace.toOptional(); - }, else => return .none, } @@ -907,6 +902,10 @@ pub const Decl = struct { else => return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), .struct_type => |struct_type| struct_type.namespace, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.namespace.toOptional(); + }, else => .none, }, } @@ -1373,6 +1372,28 @@ pub const Union = struct { requires_comptime: PropertyBoolean = .unknown, assumed_runtime_bits: bool = false, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + pub const Field = struct { /// undefined until `status` is `have_field_types` or `have_layout`. ty: Type, @@ -3639,6 +3660,10 @@ pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace { return mod.allocated_namespaces.at(@enumToInt(index)); } +pub fn unionPtr(mod: *Module, index: Union.Index) *Union { + return mod.intern_pool.unionPtr(index); +} + pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { return mod.intern_pool.structPtr(index); } @@ -4112,7 +4137,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { }; } - if (decl.getUnion()) |union_obj| { + if (decl.getUnion(mod)) |union_obj| { union_obj.zir_index = inst_map.get(union_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; @@ -5988,20 +6013,10 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { decl.analysis = .outdated; } -pub const CreateNamespaceOptions = struct { - parent: Namespace.OptionalIndex, - file_scope: *File, - ty: Type, -}; - -pub fn createNamespace(mod: *Module, options: CreateNamespaceOptions) !Namespace.Index { +pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index { if (mod.namespaces_free_list.popOrNull()) |index| return index; const ptr = try mod.allocated_namespaces.addOne(mod.gpa); - ptr.* = .{ - .parent = options.parent, - .file_scope = options.file_scope, - .ty = options.ty, - }; + ptr.* = initialization; return @intToEnum(Namespace.Index, mod.allocated_namespaces.len - 1); } @@ -6021,6 +6036,14 @@ pub fn destroyStruct(mod: *Module, index: Struct.Index) void { return mod.intern_pool.destroyStruct(mod.gpa, index); } +pub fn createUnion(mod: *Module, initialization: Union) Allocator.Error!Union.Index { + return mod.intern_pool.createUnion(mod.gpa, initialization); +} + +pub fn destroyUnion(mod: *Module, index: Union.Index) void { + return mod.intern_pool.destroyUnion(mod.gpa, index); +} + pub fn allocateNewDecl( mod: *Module, namespace: Namespace.Index, @@ -7068,6 +7091,15 @@ pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { return i.toValue(); } +pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value { + const i = try intern(mod, .{ .un = .{ + .ty = union_ty.ip_index, + .tag = tag.ip_index, + .val = val.ip_index, + } }); + return i.toValue(); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } @@ -7276,3 +7308,8 @@ pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null; return mod.structPtr(struct_index); } + +pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { + const union_index = mod.intern_pool.indexToUnion(ty.ip_index).unwrap() orelse return null; + return mod.unionPtr(union_index); +} diff --git a/src/Sema.zig b/src/Sema.zig index 1f72470f9e..76ac887c06 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3123,6 +3123,8 @@ fn zirUnionDecl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; + const gpa = sema.gpa; const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); var extra_index: usize = extended.operand; @@ -3142,49 +3144,57 @@ fn zirUnionDecl( break :blk decls_len; } else 0; - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (small.has_tag_type or small.auto_enum_tag) - Type.Tag.union_tagged - else if (small.layout != .Auto) - Type.Tag.@"union" - else switch (block.sema.mod.optimizeMode()) { - .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, - .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", - }; - const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); - union_payload.* = .{ - .base = .{ .tag = type_tag }, - .data = union_obj, - }; - const union_ty = Type.initPayload(&union_payload.base); - const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); - const mod = sema.mod; + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the union type gains an + // InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = union_val, + .val = undefined, }, small.name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - union_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const union_index = try mod.createUnion(.{ .owner_decl = new_decl_index, .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = small.layout, .status = .none, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = union_ty, - .file_scope = block.getFileScope(mod), - }), - }; + .namespace = new_namespace_index, + }); + errdefer mod.destroyUnion(union_index); + + const union_ty = try mod.intern_pool.get(gpa, .{ .union_type = .{ + .index = union_index, + .runtime_tag = if (small.has_tag_type or small.auto_enum_tag) + .tagged + else if (small.layout != .Auto) + .none + else switch (block.sema.mod.optimizeMode()) { + .Debug, .ReleaseSafe => .safety, + .ReleaseFast, .ReleaseSmall => .none, + }, + } }); + errdefer mod.intern_pool.remove(union_ty); + + new_decl.val = union_ty.toValue(); + new_namespace.ty = union_ty.toType(); - _ = try mod.scanNamespace(union_obj.namespace, extra_index, decls_len, new_decl); + _ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); @@ -4246,6 +4256,8 @@ fn validateUnionInit( instrs: []const Zir.Inst.Index, union_ptr: Air.Inst.Ref, ) CompileError!void { + const mod = sema.mod; + if (instrs.len != 1) { const msg = msg: { const msg = try sema.errMsg( @@ -4343,7 +4355,7 @@ fn validateUnionInit( break; } - const tag_ty = union_ty.unionTagTypeHypothetical(); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); @@ -8273,7 +8285,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Enum => operand, .Union => blk: { const union_ty = try sema.resolveTypeFields(operand_ty); - const tag_ty = union_ty.unionTagType() orelse { + const tag_ty = union_ty.unionTagType(mod) orelse { return sema.fail( block, operand_src, @@ -10158,7 +10170,7 @@ fn zirSwitchCapture( const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; if (operand_ty.zigTypeTag(mod) == .Union) { const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?); - const union_obj = operand_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(operand_ty).?; const field_ty = union_obj.fields.values()[field_index].ty; if (try sema.resolveDefinedValue(block, sema.src, operand_ptr)) |union_val| { if (is_ref) { @@ -10229,7 +10241,7 @@ fn zirSwitchCapture( switch (operand_ty.zigTypeTag(mod)) { .Union => { - const union_obj = operand_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(operand_ty).?; const first_item = try sema.resolveInst(items[0]); // Previous switch validation ensured this will succeed const first_item_val = sema.resolveConstValue(block, .unneeded, first_item, "") catch unreachable; @@ -10403,7 +10415,7 @@ fn zirSwitchCond( .Union => { const union_ty = try sema.resolveTypeFields(operand_ty); - const enum_ty = union_ty.unionTagType() orelse { + const enum_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "switch on union with no attached enum", .{}); errdefer msg.destroy(sema.gpa); @@ -11627,7 +11639,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally and !special.is_inline) for (seen_enum_fields, 0..) |seen_field, index| { if (seen_field != null) continue; - const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(maybe_union_ty).?; const field_ty = union_obj.fields.values()[index].ty; if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false @@ -12068,7 +12080,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } break :hf switch (ty.zigTypeTag(mod)) { .Struct => ty.structFields(mod).contains(field_name), - .Union => ty.unionFields().contains(field_name), + .Union => ty.unionFields(mod).contains(field_name), .Enum => ty.enumFields().contains(field_name), .Array => mem.eql(u8, field_name, "len"), else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ @@ -15415,7 +15427,7 @@ fn analyzeCmpUnionTag( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const union_ty = try sema.resolveTypeFields(sema.typeOf(un)); - const union_tag_ty = union_ty.unionTagType() orelse { + const union_tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); errdefer msg.destroy(sema.gpa); @@ -16403,7 +16415,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.resolveTypeLayout(ty); // Getting alignment requires type layout const layout = union_ty.containerLayout(mod); - const union_fields = union_ty.unionFields(); + const union_fields = union_ty.unionFields(mod); const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count()); for (union_field_vals, 0..) |*field_val, i| { @@ -16458,7 +16470,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespace(mod)); - const enum_tag_ty_val = if (union_ty.unionTagType()) |tag_ty| v: { + const enum_tag_ty_val = if (union_ty.unionTagType(mod)) |tag_ty| v: { const ty_val = try Value.Tag.ty.create(sema.arena, tag_ty); break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); } else Value.null; @@ -17877,12 +17889,13 @@ fn unionInit( field_name: []const u8, field_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src); - const field = union_ty.unionFields().values()[field_index]; + const field = union_ty.unionFields(mod).values()[field_index]; const init = try sema.coerce(block, field.ty, uncasted_init, init_src); if (try sema.resolveMaybeUndefVal(init)) |init_val| { - const tag_ty = union_ty.unionTagTypeHypothetical(); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ @@ -17983,7 +17996,7 @@ fn zirStructInit( const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); - const tag_ty = resolved_ty.unionTagTypeHypothetical(); + const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); @@ -18006,7 +18019,7 @@ fn zirStructInit( const alloc = try block.addTy(.alloc, alloc_ty); const field_ptr = try sema.unionFieldPtr(block, field_src, alloc, field_name, field_src, resolved_ty, true); try sema.storePtr(block, src, field_ptr, init_inst); - const new_tag = try sema.addConstant(resolved_ty.unionTagTypeHypothetical(), tag_val); + const new_tag = try sema.addConstant(resolved_ty.unionTagTypeHypothetical(mod), tag_val); _ = try block.addBinOp(.set_union_tag, alloc, new_tag); return sema.makePtrConst(block, alloc); } @@ -18544,7 +18557,7 @@ fn fieldType( return sema.addType(field.ty); }, .Union => { - const union_obj = cur_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(cur_ty).?; const field = union_obj.fields.get(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); return sema.addType(field.ty); @@ -18726,7 +18739,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.addStrLit(block, bytes); }, .Enum => operand_ty, - .Union => operand_ty.unionTagType() orelse { + .Union => operand_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "union '{}' is untagged", .{ operand_ty.fmt(sema.mod), @@ -19245,42 +19258,53 @@ fn zirReify( errdefer new_decl_arena.deinit(); const new_decl_arena_allocator = new_decl_arena.allocator(); - const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (!tag_type_val.isNull(mod)) - Type.Tag.union_tagged - else if (layout != .Auto) - Type.Tag.@"union" - else switch (mod.optimizeMode()) { - .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, - .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", - }; - const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); - union_payload.* = .{ - .base = .{ .tag = type_tag }, - .data = union_obj, - }; - const union_ty = Type.initPayload(&union_payload.base); - const new_union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the union type gains an + // InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = new_union_val, + .val = undefined, }, name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - union_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const union_index = try mod.createUnion(.{ .owner_decl = new_decl_index, .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = layout, .status = .have_field_types, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = union_ty, - .file_scope = block.getFileScope(mod), - }), - }; + .namespace = new_namespace_index, + }); + const union_obj = mod.unionPtr(union_index); + errdefer mod.destroyUnion(union_index); + + const union_ty = try mod.intern_pool.get(gpa, .{ .union_type = .{ + .index = union_index, + .runtime_tag = if (!tag_type_val.isNull(mod)) + .tagged + else if (layout != .Auto) + .none + else switch (mod.optimizeMode()) { + .Debug, .ReleaseSafe => .safety, + .ReleaseFast, .ReleaseSmall => .none, + }, + } }); + errdefer mod.intern_pool.remove(union_ty); + + new_decl.val = union_ty.toValue(); + new_namespace.ty = union_ty.toType(); // Tag type var tag_ty_field_names: ?Module.EnumFull.NameMap = null; @@ -21981,8 +22005,8 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr ptr_ty_data.@"align" = blk: { if (mod.typeToStruct(parent_ty)) |struct_obj| { break :blk struct_obj.fields.values()[field_index].abi_align; - } else if (parent_ty.cast(Type.Payload.Union)) |union_obj| { - break :blk union_obj.data.fields.values()[field_index].abi_align; + } else if (mod.typeToUnion(parent_ty)) |union_obj| { + break :blk union_obj.fields.values()[field_index].abi_align; } else { break :blk 0; } @@ -23443,8 +23467,7 @@ fn explainWhyTypeIsComptimeInner( .Union => { if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return; - if (ty.cast(Type.Payload.Union)) |payload| { - const union_obj = payload.data; + if (mod.typeToUnion(ty)) |union_obj| { for (union_obj.fields.values(), 0..) |field, i| { const field_src_loc = union_obj.fieldSrcLoc(sema.mod, .{ .index = i, @@ -24144,7 +24167,7 @@ fn fieldVal( } } const union_ty = try sema.resolveTypeFields(child_type); - if (union_ty.unionTagType()) |enum_ty| { + if (union_ty.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| { const field_index = @intCast(u32, field_index_usize); return sema.addConstant( @@ -24358,7 +24381,7 @@ fn fieldPtr( } } const union_ty = try sema.resolveTypeFields(child_type); - if (union_ty.unionTagType()) |enum_ty| { + if (union_ty.unionTagType(mod)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name)) |field_index| { const field_index_u32 = @intCast(u32, field_index); var anon_decl = try block.startAnonDecl(); @@ -24489,7 +24512,7 @@ fn fieldCallBind( }, .Union => { const union_ty = try sema.resolveTypeFields(concrete_ty); - const fields = union_ty.unionFields(); + const fields = union_ty.unionFields(mod); const field_index_usize = fields.getIndex(field_name) orelse break :find_field; const field_index = @intCast(u32, field_index_usize); const field = fields.values()[field_index]; @@ -24964,7 +24987,7 @@ fn unionFieldPtr( const union_ptr_ty = sema.typeOf(union_ptr); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, mod, .{ @@ -25028,7 +25051,7 @@ fn unionFieldPtr( try sema.requireRuntimeBlock(block, src, null); if (!initializing and union_obj.layout == .Auto and block.wantSafety() and - union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1) + union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); @@ -25057,7 +25080,7 @@ fn unionFieldVal( assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); @@ -25103,7 +25126,7 @@ fn unionFieldVal( try sema.requireRuntimeBlock(block, src, null); if (union_obj.layout == .Auto and block.wantSafety() and - union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1) + union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); @@ -26189,7 +26212,7 @@ fn coerceExtra( }, .Union => blk: { // union to its own tag type - const union_tag_ty = inst_ty.unionTagType() orelse break :blk; + const union_tag_ty = inst_ty.unionTagType(mod) orelse break :blk; if (union_tag_ty.eql(dest_ty, sema.mod)) { return sema.unionToTag(block, dest_ty, inst, inst_src); } @@ -28622,7 +28645,7 @@ fn coerceEnumToUnion( const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const tag_ty = union_ty.unionTagType() orelse { + const tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ union_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), @@ -28649,7 +28672,7 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(msg); }; - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field = union_obj.fields.values()[field_index]; const field_ty = try sema.resolveTypeFields(field.ty); if (field_ty.zigTypeTag(mod) == .NoReturn) { @@ -28679,10 +28702,7 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(msg); }; - return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = val, - .val = opv, - })); + return sema.addConstant(union_ty, try mod.unionValue(union_ty, val, opv)); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -28699,7 +28719,7 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(msg); } - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; { var msg: ?*Module.ErrorMsg = null; errdefer if (msg) |some| some.destroy(sema.gpa); @@ -29350,10 +29370,13 @@ fn analyzeRef( const operand_ty = sema.typeOf(operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - switch (val.tag()) { - .extern_fn, .function => { - const decl_index = val.pointerDecl().?; - return sema.analyzeDeclRef(decl_index); + switch (val.ip_index) { + .none => switch (val.tag()) { + .extern_fn, .function => { + const decl_index = val.pointerDecl().?; + return sema.analyzeDeclRef(decl_index); + }, + else => {}, }, else => {}, } @@ -31523,8 +31546,9 @@ fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void } fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(resolved_ty).?; switch (union_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { @@ -31617,27 +31641,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return false; }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - var requires_comptime = false; - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; - } - if (requires_comptime) { - union_obj.requires_comptime = .yes; - } else { - union_obj.requires_comptime = .no; - } - return requires_comptime; - }, - } - }, - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -31734,10 +31737,31 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + var requires_comptime = false; + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; + } + if (requires_comptime) { + union_obj.requires_comptime = .yes; + } else { + union_obj.requires_comptime = .no; + } + return requires_comptime; + }, + } + }, + .opaque_type => false, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -31829,8 +31853,9 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { try sema.resolveUnionLayout(ty); + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(resolved_ty).?; switch (union_obj.status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, .fully_resolved_wip, .fully_resolved => return, @@ -31858,15 +31883,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { const mod = sema.mod; switch (ty.ip_index) { - .none => switch (ty.tag()) { - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - try sema.resolveTypeFieldsUnion(ty, union_obj); - return ty; - }, - - else => return ty, - }, + // TODO: After the InternPool transition is complete, change this to `unreachable`. + .none => return ty, .u1_type, .u8_type, @@ -31957,7 +31975,12 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { try sema.resolveTypeFieldsStruct(ty, struct_obj); return ty; }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + try sema.resolveTypeFieldsUnion(ty, union_obj); + return ty; + }, + else => return ty, }, } @@ -33123,32 +33146,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .@"union", .union_safety_tagged, .union_tagged => { - const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; - const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse - return null; - const fields = union_obj.fields.values(); - if (fields.len == 0) return Value.@"unreachable"; - const only_field = fields[0]; - if (only_field.ty.eql(resolved_ty, sema.mod)) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - union_obj.srcLoc(sema.mod), - "union '{}' depends on itself", - .{ty.fmt(sema.mod)}, - ); - try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(msg); - } - const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse - return null; - // TODO make this not allocate. - return try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = val_val, - }); - }, .array => { if (ty.arrayLen(mod) == 0) @@ -33268,10 +33265,37 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return empty.toValue(); }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const resolved_ty = try sema.resolveTypeFields(ty); + const union_obj = mod.unionPtr(union_type.index); + const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse + return null; + const fields = union_obj.fields.values(); + if (fields.len == 0) return Value.@"unreachable"; + const only_field = fields[0]; + if (only_field.ty.eql(resolved_ty, sema.mod)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + union_obj.srcLoc(sema.mod), + "union '{}' depends on itself", + .{ty.fmt(sema.mod)}, + ); + try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{}); + return sema.failWithOwnedErrorMsg(msg); + } + const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse + return null; + const only = try mod.intern(.{ .un = .{ + .ty = resolved_ty.ip_index, + .tag = tag_val.ip_index, + .val = val_val.ip_index, + } }); + return only.toValue(); + }, .opaque_type => null, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -33710,30 +33734,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return false; }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - if (union_obj.status == .field_types_wip) - return false; - - try sema.resolveTypeFieldsUnion(ty, union_obj); - - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (try sema.typeRequiresComptime(field.ty)) { - union_obj.requires_comptime = .yes; - return true; - } - } - union_obj.requires_comptime = .no; - return false; - }, - } - }, - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -33837,10 +33837,34 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (union_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsUnion(ty, union_obj); + + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.typeRequiresComptime(field.ty)) { + union_obj.requires_comptime = .yes; + return true; + } + } + union_obj.requires_comptime = .no; + return false; + }, + } + }, + .opaque_type => false, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -33905,8 +33929,9 @@ fn unionFieldIndex( field_name: []const u8, field_src: LazySrcLoc, ) !u32 { + const mod = sema.mod; const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index_usize = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); return @intCast(u32, field_index_usize); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 2105d3108f..cf9888f357 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -91,7 +91,7 @@ pub fn print( try writer.writeAll(".{ "); try print(.{ - .ty = ty.cast(Type.Payload.Union).?.data.tag_ty, + .ty = mod.unionPtr(mod.intern_pool.indexToKey(ty.ip_index).union_type.index).tag_ty, .val = union_val.tag, }, writer, level - 1, mod); try writer.writeAll(" = "); @@ -185,7 +185,7 @@ pub fn print( }, } } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { - const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; + const field_name = field_ptr.container_ty.unionFields(mod).keys()[field_ptr.field_index]; return writer.print(".{s}", .{field_name}); } else if (field_ptr.container_ty.isSlice(mod)) { switch (field_ptr.field_index) { diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 6589425fc2..72a6172895 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -79,7 +79,7 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { const invalid = std.math.maxInt(u8); switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); var max_count: u8 = 0; for (fields.values()) |field| { const field_count = countFloats(field.ty, mod, maybe_float_bits); @@ -118,7 +118,7 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type { switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.values()) |field| { if (getFloatArrayType(field.ty, mod)) |some| return some; } diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 7a7d632837..e4a07f22bf 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -62,7 +62,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; - for (ty.unionFields().values()) |field| { + for (ty.unionFields(mod).values()) |field| { if (field.ty.bitSize(mod) > 32 or field.normalAlignment(mod) > 32) { return Class.arrSize(bit_size, 64); } @@ -121,7 +121,7 @@ fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 { const invalid = std.math.maxInt(u32); switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); var max_count: u32 = 0; for (fields.values()) |field| { const field_count = countFloats(field.ty, mod, maybe_float_bits); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 90c26d5d84..c1409e4977 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1739,8 +1739,8 @@ fn isByRef(ty: Type, mod: *Module) bool { .Frame, => return ty.hasRuntimeBitsIgnoreComptime(mod), .Union => { - if (ty.castTag(.@"union")) |union_ty| { - if (union_ty.data.layout == .Packed) { + if (mod.typeToUnion(ty)) |union_obj| { + if (union_obj.layout == .Packed) { return ty.abiSize(mod) > 8; } } @@ -3175,7 +3175,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .Union => { // in this case we have a packed union which will not be passed by reference. - const union_ty = ty.cast(Type.Payload.Union).?.data; + const union_ty = mod.typeToUnion(ty).?; const union_obj = val.castTag(.@"union").?.data; const field_index = ty.unionTagFieldIndex(union_obj.tag, func.bin_file.base.options.module.?).?; const field_ty = union_ty.fields.values()[field_index].ty; @@ -5086,12 +5086,12 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = result: { const union_ty = func.typeOfIndex(inst); const layout = union_ty.unionGetLayout(mod); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field = union_obj.fields.values()[extra.field_index]; const field_name = union_obj.fields.keys()[extra.field_index]; const tag_int = blk: { - const tag_ty = union_ty.unionTagTypeHypothetical(); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = tag_ty.enumFieldIndex(field_name).?; var tag_val_payload: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index ee836bebdb..92b0f4dc40 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -70,8 +70,8 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class { } const layout = ty.unionGetLayout(mod); std.debug.assert(layout.tag_size == 0); - if (ty.unionFields().count() > 1) return memory; - return classifyType(ty.unionFields().values()[0].ty, mod); + if (ty.unionFields(mod).count() > 1) return memory; + return classifyType(ty.unionFields(mod).values()[0].ty, mod); }, .ErrorUnion, .Frame, @@ -111,11 +111,11 @@ pub fn scalarType(ty: Type, mod: *Module) Type { if (ty.containerLayout(mod) != .Packed) { const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0 and layout.tag_size != 0) { - return scalarType(ty.unionTagTypeSafety().?, mod); + return scalarType(ty.unionTagTypeSafety(mod).?, mod); } - std.debug.assert(ty.unionFields().count() == 1); + std.debug.assert(ty.unionFields(mod).count() == 1); } - return scalarType(ty.unionFields().values()[0].ty, mod); + return scalarType(ty.unionFields(mod).values()[0].ty, mod); }, else => return ty, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 77661b2a14..7b93ff2059 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -11410,9 +11410,9 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = try self.allocRegOrMem(inst, false); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; - const tag_ty = union_ty.unionTagTypeSafety().?; + const tag_ty = union_obj.tag_ty; const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; const tag_val = Value.initPayload(&tag_pl.base); diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index 45ce64a98e..69df5dbf4c 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -338,7 +338,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { if (ty_size > 64) return memory_class; - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.values()) |field| { if (field.abi_align != 0) { if (field.abi_align < field.ty.abiAlignment(mod)) { diff --git a/src/codegen.zig b/src/codegen.zig index b29af1ff93..5c022392bf 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -568,7 +568,7 @@ pub fn generateSymbol( if (layout.payload_size == 0) { return generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType().?, + .ty = typed_value.ty.unionTagType(mod).?, .val = union_obj.tag, }, code, debug_output, reloc_info); } @@ -576,7 +576,7 @@ pub fn generateSymbol( // Check if we should store the tag first. if (layout.tag_align >= layout.payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType().?, + .ty = typed_value.ty.unionTagType(mod).?, .val = union_obj.tag, }, code, debug_output, reloc_info)) { .ok => {}, @@ -584,7 +584,7 @@ pub fn generateSymbol( } } - const union_ty = typed_value.ty.cast(Type.Payload.Union).?.data; + const union_ty = mod.typeToUnion(typed_value.ty).?; const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?; assert(union_ty.haveFieldTypes()); const field_ty = union_ty.fields.values()[field_index].ty; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1c16216504..872bdb94d3 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -853,7 +853,7 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - if (ty.unionTagTypeSafety()) |tag_ty| { + if (ty.unionTagTypeSafety(mod)) |tag_ty| { const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); @@ -863,12 +863,12 @@ pub const DeclGen = struct { if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } - for (ty.unionFields().values()) |field| { + for (ty.unionFields(mod).values()) |field| { if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, val, initializer_type); break; } - if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); + if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); return writer.writeByte('}'); }, .ErrorUnion => { @@ -1451,8 +1451,8 @@ pub const DeclGen = struct { } const field_i = ty.unionTagFieldIndex(union_obj.tag, mod).?; - const field_ty = ty.unionFields().values()[field_i].ty; - const field_name = ty.unionFields().keys()[field_i]; + const field_ty = ty.unionFields(mod).values()[field_i].ty; + const field_name = ty.unionFields(mod).keys()[field_i]; if (ty.containerLayout(mod) == .Packed) { if (field_ty.hasRuntimeBits(mod)) { if (field_ty.isPtrAtRuntime(mod)) { @@ -1472,7 +1472,7 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - if (ty.unionTagTypeSafety()) |tag_ty| { + if (ty.unionTagTypeSafety(mod)) |tag_ty| { const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); @@ -1486,12 +1486,12 @@ pub const DeclGen = struct { try writer.print(" .{ } = ", .{fmtIdent(field_name)}); try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); try writer.writeByte(' '); - } else for (ty.unionFields().values()) |field| { + } else for (ty.unionFields(mod).values()) |field| { if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, Value.undef, initializer_type); break; } - if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); + if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); try writer.writeByte('}'); }, @@ -5238,13 +5238,13 @@ fn fieldLocation( .Auto, .Extern => { const field_ty = container_ty.structFieldType(field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) - return if (container_ty.unionTagTypeSafety() != null and + return if (container_ty.unionTagTypeSafety(mod) != null and !container_ty.unionHasAllZeroBitFieldTypes(mod)) .{ .field = .{ .identifier = "payload" } } else .begin; - const field_name = container_ty.unionFields().keys()[field_index]; - return .{ .field = if (container_ty.unionTagTypeSafety()) |_| + const field_name = container_ty.unionFields(mod).keys()[field_index]; + return .{ .field = if (container_ty.unionTagTypeSafety(mod)) |_| .{ .payload_identifier = field_name } else .{ .identifier = field_name } }; @@ -5424,37 +5424,6 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { else .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, - .@"union", .union_safety_tagged, .union_tagged => if (struct_ty.containerLayout(mod) == .Packed) { - const operand_lval = if (struct_byval == .constant) blk: { - const operand_local = try f.allocLocal(inst, struct_ty); - try f.writeCValue(writer, operand_local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, struct_byval, .Initializer); - try writer.writeAll(";\n"); - break :blk operand_local; - } else struct_byval; - - const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy(&"); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(", &"); - try f.writeCValue(writer, operand_lval, .Other); - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("));\n"); - - if (struct_byval == .constant) { - try freeLocal(f, inst, operand_lval.new_local, 0); - } - - return local; - } else field_name: { - const name = struct_ty.unionFields().keys()[extra.field_index]; - break :field_name if (struct_ty.unionTagTypeSafety()) |_| - .{ .payload_identifier = name } - else - .{ .identifier = name }; - }, else => unreachable, }, else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { @@ -5520,6 +5489,41 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { return local; }, }, + .union_type => |union_type| field_name: { + const union_obj = mod.unionPtr(union_type.index); + if (union_obj.layout == .Packed) { + const operand_lval = if (struct_byval == .constant) blk: { + const operand_local = try f.allocLocal(inst, struct_ty); + try f.writeCValue(writer, operand_local, .Other); + try writer.writeAll(" = "); + try f.writeCValue(writer, struct_byval, .Initializer); + try writer.writeAll(";\n"); + break :blk operand_local; + } else struct_byval; + + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("memcpy(&"); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(", &"); + try f.writeCValue(writer, operand_lval, .Other); + try writer.writeAll(", sizeof("); + try f.renderType(writer, inst_ty); + try writer.writeAll("));\n"); + + if (struct_byval == .constant) { + try freeLocal(f, inst, operand_lval.new_local, 0); + } + + return local; + } else { + const name = union_obj.fields.keys()[extra.field_index]; + break :field_name if (union_type.hasTag()) .{ + .payload_identifier = name, + } else .{ + .identifier = name, + }; + } + }, else => unreachable, }, }; @@ -6461,7 +6465,7 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const union_ty = f.typeOf(bin_op.lhs).childType(mod); const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) return .none; - const tag_ty = union_ty.unionTagTypeSafety().?; + const tag_ty = union_ty.unionTagTypeSafety(mod).?; const writer = f.object.writer(); const a = try Assignment.start(f, writer, tag_ty); @@ -6907,7 +6911,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = f.typeOfIndex(inst); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; const payload_ty = f.typeOf(extra.init); const payload = try f.resolveInst(extra.init); @@ -6923,7 +6927,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { return local; } - const field: CValue = if (union_ty.unionTagTypeSafety()) |tag_ty| field: { + const field: CValue = if (union_ty.unionTagTypeSafety(mod)) |tag_ty| field: { const layout = union_ty.unionGetLayout(mod); if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name).?; diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 3321df6d49..bcb4b92228 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -303,7 +303,7 @@ pub const CType = extern union { ); } pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs { - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const union_payload_align = union_obj.abiAlignment(mod, false); return init(union_payload_align, union_payload_align); } @@ -1498,7 +1498,7 @@ pub const CType = extern union { if (lookup.isMutable()) { for (0..switch (zig_ty_tag) { .Struct => ty.structFieldCount(mod), - .Union => ty.unionFields().count(), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i, mod); @@ -1531,7 +1531,7 @@ pub const CType = extern union { .payload => unreachable, }); } else { - const tag_ty = ty.unionTagTypeSafety(); + const tag_ty = ty.unionTagTypeSafety(mod); const is_tagged_union_wrapper = kind != .payload and tag_ty != null; const is_struct = zig_ty_tag == .Struct or is_tagged_union_wrapper; switch (kind) { @@ -1580,7 +1580,7 @@ pub const CType = extern union { var is_packed = false; for (0..switch (zig_ty_tag) { .Struct => ty.structFieldCount(mod), - .Union => ty.unionFields().count(), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i, mod); @@ -1930,7 +1930,7 @@ pub const CType = extern union { const zig_ty_tag = ty.zigTypeTag(mod); const fields_len = switch (zig_ty_tag) { .Struct => ty.structFieldCount(mod), - .Union => ty.unionFields().count(), + .Union => ty.unionFields(mod).count(), else => unreachable, }; @@ -1956,7 +1956,7 @@ pub const CType = extern union { else arena.dupeZ(u8, switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), - .Union => ty.unionFields().keys()[field_i], + .Union => ty.unionFields(mod).keys()[field_i], else => unreachable, }), .type = store.set.typeToIndex(field_ty, mod, switch (kind) { @@ -1986,7 +1986,7 @@ pub const CType = extern union { unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{ .fields = fields_pl, .owner_decl = ty.getOwnerDecl(mod), - .id = if (ty.unionTagTypeSafety()) |_| 0 else unreachable, + .id = if (ty.unionTagTypeSafety(mod)) |_| 0 else unreachable, } }; return initPayload(unnamed_pl); }, @@ -2085,7 +2085,7 @@ pub const CType = extern union { var c_field_i: usize = 0; for (0..switch (zig_ty_tag) { .Struct => ty.structFieldCount(mod), - .Union => ty.unionFields().count(), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i, mod); @@ -2106,7 +2106,7 @@ pub const CType = extern union { std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), - .Union => ty.unionFields().keys()[field_i], + .Union => ty.unionFields(mod).keys()[field_i], else => unreachable, }, mem.span(c_field.name), @@ -2122,7 +2122,7 @@ pub const CType = extern union { .packed_unnamed_union, => switch (self.kind) { .forward, .forward_parameter, .complete, .parameter, .global => unreachable, - .payload => if (ty.unionTagTypeSafety()) |_| { + .payload => if (ty.unionTagTypeSafety(mod)) |_| { const data = cty.cast(Payload.Unnamed).?.data; return ty.getOwnerDecl(mod) == data.owner_decl and data.id == 0; } else unreachable, @@ -2211,7 +2211,7 @@ pub const CType = extern union { const zig_ty_tag = ty.zigTypeTag(mod); for (0..switch (ty.zigTypeTag(mod)) { .Struct => ty.structFieldCount(mod), - .Union => ty.unionFields().count(), + .Union => ty.unionFields(mod).count(), else => unreachable, }) |field_i| { const field_ty = ty.structFieldType(field_i, mod); @@ -2228,7 +2228,7 @@ pub const CType = extern union { std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), - .Union => ty.unionFields().keys()[field_i], + .Union => ty.unionFields(mod).keys()[field_i], else => unreachable, }); autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align"); @@ -2241,7 +2241,7 @@ pub const CType = extern union { .packed_unnamed_union, => switch (self.kind) { .forward, .forward_parameter, .complete, .parameter, .global => unreachable, - .payload => if (ty.unionTagTypeSafety()) |_| { + .payload => if (ty.unionTagTypeSafety(mod)) |_| { autoHash(hasher, ty.getOwnerDecl(mod)); autoHash(hasher, @as(u32, 0)); } else unreachable, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 6b12c447dc..c299253442 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2178,7 +2178,7 @@ pub const Object = struct { break :blk fwd_decl; }; - const union_obj = ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; if (!union_obj.haveFieldTypes() or !ty.hasRuntimeBitsIgnoreComptime(mod)) { const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); dib.replaceTemporary(fwd_decl, union_di_ty); @@ -3063,7 +3063,7 @@ pub const DeclGen = struct { gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); const layout = t.unionGetLayout(mod); - const union_obj = t.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(t).?; if (union_obj.layout == .Packed) { const bitsize = @intCast(c_uint, t.bitSize(mod)); @@ -3797,11 +3797,11 @@ pub const DeclGen = struct { if (layout.payload_size == 0) { return lowerValue(dg, .{ - .ty = tv.ty.unionTagTypeSafety().?, + .ty = tv.ty.unionTagTypeSafety(mod).?, .val = tag_and_val.tag, }); } - const union_obj = tv.ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(tv.ty).?; const field_index = tv.ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?; assert(union_obj.haveFieldTypes()); @@ -3851,7 +3851,7 @@ pub const DeclGen = struct { } } const llvm_tag_value = try lowerValue(dg, .{ - .ty = tv.ty.unionTagTypeSafety().?, + .ty = tv.ty.unionTagTypeSafety(mod).?, .val = tag_and_val.tag, }); var fields: [3]*llvm.Value = undefined; @@ -9410,7 +9410,7 @@ pub const FuncGen = struct { const union_ty = self.typeOfIndex(inst); const union_llvm_ty = try self.dg.lowerType(union_ty); const layout = union_ty.unionGetLayout(mod); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; if (union_obj.layout == .Packed) { const big_bits = union_ty.bitSize(mod); @@ -9427,7 +9427,7 @@ pub const FuncGen = struct { } const tag_int = blk: { - const tag_ty = union_ty.unionTagTypeHypothetical(); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); const union_field_name = union_obj.fields.keys()[extra.field_index]; const enum_field_index = tag_ty.enumFieldIndex(union_field_name).?; var tag_val_payload: Value.Payload.U32 = .{ diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 41b523b8f4..1176eb746d 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -755,10 +755,10 @@ pub const DeclGen = struct { const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { - return try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); + return try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); } - const union_ty = ty.cast(Type.Payload.Union).?.data; + const union_ty = mod.typeToUnion(ty).?; if (union_ty.layout == .Packed) { return dg.todo("packed union constants", .{}); } @@ -770,7 +770,7 @@ pub const DeclGen = struct { const tag_first = layout.tag_align >= layout.payload_align; if (has_tag and tag_first) { - try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); } const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { @@ -782,7 +782,7 @@ pub const DeclGen = struct { try self.addUndef(payload_padding_len); if (has_tag and !tag_first) { - try self.lower(ty.unionTagTypeSafety().?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); } try self.addUndef(layout.padding); @@ -1121,7 +1121,7 @@ pub const DeclGen = struct { fn resolveUnionType(self: *DeclGen, ty: Type, maybe_active_field: ?usize) !CacheRef { const mod = self.module; const layout = ty.unionGetLayout(mod); - const union_ty = ty.cast(Type.Payload.Union).?.data; + const union_ty = mod.typeToUnion(ty).?; if (union_ty.layout == .Packed) { return self.todo("packed union types", .{}); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 7d033de584..d1e8d9601b 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -432,7 +432,7 @@ pub const DeclState = struct { }, .Union => { const layout = ty.unionGetLayout(mod); - const union_obj = ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0; const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size; const is_tagged = layout.tag_size > 0; @@ -476,7 +476,7 @@ pub const DeclState = struct { try dbg_info_buffer.writer().print("{s}\x00", .{union_name}); } - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.keys()) |field_name| { const field = fields.get(field_name).?; if (!field.ty.hasRuntimeBits(mod)) continue; diff --git a/src/type.zig b/src/type.zig index 4e374a39d5..0096a96aa2 100644 --- a/src/type.zig +++ b/src/type.zig @@ -68,11 +68,6 @@ pub const Type = struct { .enum_simple, .enum_numbered, => return .Enum, - - .@"union", - .union_safety_tagged, - .union_tagged, - => return .Union, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return .Int, @@ -140,6 +135,7 @@ pub const Type = struct { }, // values, not types + .un => unreachable, .extern_func => unreachable, .int => unreachable, .ptr => unreachable, @@ -585,12 +581,6 @@ pub const Type = struct { const b_enum_obj = (b.cast(Payload.EnumNumbered) orelse return false).data; return a_enum_obj == b_enum_obj; }, - - .@"union", .union_safety_tagged, .union_tagged => { - const a_union_obj = a.cast(Payload.Union).?.data; - const b_union_obj = (b.cast(Payload.Union) orelse return false).data; - return a_union_obj == b_union_obj; - }, } } @@ -752,12 +742,6 @@ pub const Type = struct { std.hash.autoHash(hasher, std.builtin.TypeId.Enum); std.hash.autoHash(hasher, enum_obj); }, - - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj: *const Module.Union = ty.cast(Payload.Union).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Union); - std.hash.autoHash(hasher, union_obj); - }, } } @@ -935,7 +919,6 @@ pub const Type = struct { .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred), .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), - .@"union", .union_safety_tagged, .union_tagged => return self.copyPayloadShallow(allocator, Payload.Union), .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple), .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered), .enum_full, .enum_nonexhaustive => return self.copyPayloadShallow(allocator, Payload.EnumFull), @@ -1011,12 +994,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), union_obj.owner_decl, - }); - }, .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; return writer.print("({s} decl={d})", .{ @@ -1221,11 +1198,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const decl = mod.declPtr(union_obj.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, .enum_full, .enum_nonexhaustive => { const enum_full = ty.cast(Payload.EnumFull).?.data; const decl = mod.declPtr(enum_full.owner_decl); @@ -1518,13 +1490,18 @@ pub const Type = struct { } }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + const decl = mod.declPtr(union_obj.owner_decl); + try decl.renderFullyQualifiedName(mod, writer); + }, .opaque_type => |opaque_type| { const decl = mod.declPtr(opaque_type.decl); try decl.renderFullyQualifiedName(mod, writer); }, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -1627,45 +1604,6 @@ pub const Type = struct { return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - if (union_obj.status == .field_types_wip) { - // In this case, we guess that hasRuntimeBits() for this type is true, - // and then later if our guess was incorrect, we emit a compile error. - union_obj.assumed_runtime_bits = true; - return true; - } - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_obj.haveFieldTypes()), - .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, - } - for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) { - return true; - } - - switch (strat) { - .sema => |sema| _ = try sema.resolveTypeFields(ty), - .eager => assert(union_obj.haveFieldTypes()), - .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, - } - for (union_obj.fields.values()) |value| { - if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) - return true; - } else { - return false; - } - }, - .array => return ty.arrayLen(mod) != 0 and try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), @@ -1795,10 +1733,40 @@ pub const Type = struct { } }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_type.runtime_tag) { + .none => { + if (union_obj.status == .field_types_wip) { + // In this case, we guess that hasRuntimeBits() for this type is true, + // and then later if our guess was incorrect, we emit a compile error. + union_obj.assumed_runtime_bits = true; + return true; + } + }, + .safety, .tagged => { + if (try union_obj.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) { + return true; + } + }, + } + switch (strat) { + .sema => |sema| _ = try sema.resolveTypeFields(ty), + .eager => assert(union_obj.haveFieldTypes()), + .lazy => if (!union_obj.haveFieldTypes()) return error.NeedLazy, + } + for (union_obj.fields.values()) |value| { + if (try value.ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) + return true; + } else { + return false; + } + }, + .opaque_type => true, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -1847,8 +1815,6 @@ pub const Type = struct { => ty.childType(mod).hasWellDefinedLayout(mod), .optional => ty.isPtrLikeOptional(mod), - .@"union", .union_safety_tagged => ty.cast(Payload.Union).?.data.layout != .Auto, - .union_tagged => false, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => true, @@ -1912,10 +1878,14 @@ pub const Type = struct { }; return struct_obj.layout != .Auto; }, - .union_type => @panic("TODO"), + .union_type => |union_type| switch (union_type.runtime_tag) { + .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, + .tagged => false, + }, .opaque_type => false, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -2146,14 +2116,6 @@ pub const Type = struct { const int_tag_ty = try ty.intTagType(mod); return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, false); - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, true); - }, .inferred_alloc_const, .inferred_alloc_mut, @@ -2312,10 +2274,14 @@ pub const Type = struct { } return AbiAlignmentAdvanced{ .scalar = big_align }; }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); + }, .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -2508,14 +2474,6 @@ pub const Type = struct { const int_tag_ty = try ty.intTagType(mod); return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; }, - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return abiSizeAdvancedUnion(ty, mod, strat, union_obj, false); - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return abiSizeAdvancedUnion(ty, mod, strat, union_obj, true); - }, .array => { const payload = ty.castTag(.array).?.data; @@ -2737,10 +2695,14 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return abiSizeAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); + }, .opaque_type => unreachable, // no size available // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -2860,21 +2822,6 @@ pub const Type = struct { return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); }, - .@"union", .union_safety_tagged, .union_tagged => { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout(mod) != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - const union_obj = ty.cast(Payload.Union).?.data; - assert(union_obj.haveFieldTypes()); - - var size: u64 = 0; - for (union_obj.fields.values()) |field| { - size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); - } - return size; - }, - .array => { const payload = ty.castTag(.array).?.data; const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); @@ -2996,10 +2943,24 @@ pub const Type = struct { return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + if (ty.containerLayout(mod) != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); + + var size: u64 = 0; + for (union_obj.fields.values()) |field| { + size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); + } + return size; + }, .opaque_type => unreachable, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -3022,8 +2983,8 @@ pub const Type = struct { return true; }, .Union => { - if (ty.cast(Payload.Union)) |union_ty| { - return union_ty.data.haveLayout(); + if (mod.typeToUnion(ty)) |union_obj| { + return union_obj.haveLayout(); } return true; }, @@ -3413,76 +3374,71 @@ pub const Type = struct { /// Returns the tag type of a union, if the type is a union and it has a tag type. /// Otherwise, returns `null`. - pub fn unionTagType(ty: Type) ?Type { - return switch (ty.tag()) { - .union_tagged => { - const union_obj = ty.castTag(.union_tagged).?.data; - assert(union_obj.haveFieldTypes()); - return union_obj.tag_ty; + pub fn unionTagType(ty: Type, mod: *Module) ?Type { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .union_type => |union_type| switch (union_type.runtime_tag) { + .tagged => { + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); + return union_obj.tag_ty; + }, + else => null, }, - else => null, }; } /// Same as `unionTagType` but includes safety tag. /// Codegen should use this version. - pub fn unionTagTypeSafety(ty: Type) ?Type { - return switch (ty.tag()) { - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; + pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .union_type => |union_type| { + if (!union_type.hasTag()) return null; + const union_obj = mod.unionPtr(union_type.index); assert(union_obj.haveFieldTypes()); return union_obj.tag_ty; }, - else => null, }; } /// Asserts the type is a union; returns the tag type, even if the tag will /// not be stored at runtime. - pub fn unionTagTypeHypothetical(ty: Type) Type { - const union_obj = ty.cast(Payload.Union).?.data; + pub fn unionTagTypeHypothetical(ty: Type, mod: *Module) Type { + const union_obj = mod.typeToUnion(ty).?; assert(union_obj.haveFieldTypes()); return union_obj.tag_ty; } - pub fn unionFields(ty: Type) Module.Union.Fields { - const union_obj = ty.cast(Payload.Union).?.data; + pub fn unionFields(ty: Type, mod: *Module) Module.Union.Fields { + const union_obj = mod.typeToUnion(ty).?; assert(union_obj.haveFieldTypes()); return union_obj.fields; } pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) Type { - const union_obj = ty.cast(Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; const index = ty.unionTagFieldIndex(enum_tag, mod).?; assert(union_obj.haveFieldTypes()); return union_obj.fields.values()[index].ty; } pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize { - const union_obj = ty.cast(Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, mod) orelse return null; const name = union_obj.tag_ty.enumFieldName(index); return union_obj.fields.getIndex(name); } pub fn unionHasAllZeroBitFieldTypes(ty: Type, mod: *Module) bool { - return ty.cast(Payload.Union).?.data.hasAllZeroBitFieldTypes(mod); + const union_obj = mod.typeToUnion(ty).?; + return union_obj.hasAllZeroBitFieldTypes(mod); } pub fn unionGetLayout(ty: Type, mod: *Module) Module.Union.Layout { - switch (ty.tag()) { - .@"union" => { - const union_obj = ty.castTag(.@"union").?.data; - return union_obj.getLayout(mod, false); - }, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.getLayout(mod, true); - }, - else => unreachable, - } + const union_type = mod.intern_pool.indexToKey(ty.ip_index).union_type; + const union_obj = mod.unionPtr(union_type.index); + return union_obj.getLayout(mod, union_type.hasTag()); } pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { @@ -3490,9 +3446,6 @@ pub const Type = struct { .empty_struct_type => .Auto, .none => switch (ty.tag()) { .tuple, .anon_struct => .Auto, - .@"union" => ty.castTag(.@"union").?.data.layout, - .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.layout, - .union_tagged => ty.castTag(.union_tagged).?.data.layout, else => unreachable, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -3500,6 +3453,10 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; return struct_obj.layout; }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.layout; + }, else => unreachable, }, }; @@ -3777,6 +3734,7 @@ pub const Type = struct { .opaque_type => unreachable, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -4038,16 +3996,6 @@ pub const Type = struct { return null; } }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; - if (union_obj.fields.count() == 0) return Value.@"unreachable"; - const only_field = union_obj.fields.values()[0]; - const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; - _ = tag_val; - _ = val_val; - return Value.empty_struct; - }, .array => { if (ty.arrayLen(mod) == 0) @@ -4153,10 +4101,23 @@ pub const Type = struct { return empty.toValue(); }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; + if (union_obj.fields.count() == 0) return Value.@"unreachable"; + const only_field = union_obj.fields.values()[0]; + const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; + const only = try mod.intern(.{ .un = .{ + .ty = ty.ip_index, + .tag = tag_val.ip_index, + .val = val_val.ip_index, + } }); + return only.toValue(); + }, .opaque_type => return null, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -4216,20 +4177,6 @@ pub const Type = struct { return false; }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .wip, .unknown => { - // Return false to avoid incorrect dependency loops. - // This will be handled correctly once merged with - // `Sema.typeRequiresComptime`. - return false; - }, - .no => return false, - .yes => return true, - } - }, - .error_union => return ty.errorUnionPayload().comptimeOnly(mod), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -4321,10 +4268,24 @@ pub const Type = struct { } }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .wip, .unknown => { + // Return false to avoid incorrect dependency loops. + // This will be handled correctly once merged with + // `Sema.typeRequiresComptime`. + return false; + }, + .no => return false, + .yes => return true, + } + }, + .opaque_type => false, // values, not types + .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, .int => unreachable, @@ -4378,15 +4339,13 @@ pub const Type = struct { .none => switch (ty.tag()) { .enum_full => ty.castTag(.enum_full).?.data.namespace.toOptional(), .enum_nonexhaustive => ty.castTag(.enum_nonexhaustive).?.data.namespace.toOptional(), - .@"union" => ty.castTag(.@"union").?.data.namespace.toOptional(), - .union_safety_tagged => ty.castTag(.union_safety_tagged).?.data.namespace.toOptional(), - .union_tagged => ty.castTag(.union_tagged).?.data.namespace.toOptional(), - else => .none, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), .struct_type => |struct_type| struct_type.namespace, + .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + else => .none, }, }; @@ -4474,20 +4433,23 @@ pub const Type = struct { /// Asserts the type is an enum or a union. pub fn intTagType(ty: Type, mod: *Module) !Type { - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty, - .enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const field_count = enum_simple.fields.count(); - const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); - return mod.intType(.unsigned, bits); + return switch (ty.ip_index) { + .none => switch (ty.tag()) { + .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.tag_ty, + .enum_numbered => ty.castTag(.enum_numbered).?.data.tag_ty, + .enum_simple => { + const enum_simple = ty.castTag(.enum_simple).?.data; + const field_count = enum_simple.fields.count(); + const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); + return mod.intType(.unsigned, bits); + }, + else => unreachable, }, - .union_tagged => { - return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(mod); + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod), + else => unreachable, }, - else => unreachable, - } + }; } pub fn isNonexhaustiveEnum(ty: Type) bool { @@ -4663,10 +4625,6 @@ pub const Type = struct { pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].ty; - }, .tuple => return ty.castTag(.tuple).?.data.types[index], .anon_struct => return ty.castTag(.anon_struct).?.data.types[index], else => unreachable, @@ -4676,6 +4634,10 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; return struct_obj.fields.values()[index].ty; }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.fields.values()[index].ty; + }, else => unreachable, }, }; @@ -4684,10 +4646,6 @@ pub const Type = struct { pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { switch (ty.ip_index) { .none => switch (ty.tag()) { - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.fields.values()[index].normalAlignment(mod); - }, .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), else => unreachable, @@ -4698,6 +4656,10 @@ pub const Type = struct { assert(struct_obj.layout != .Packed); return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.fields.values()[index].normalAlignment(mod); + }, else => unreachable, }, } @@ -4889,18 +4851,6 @@ pub const Type = struct { return offset; }, - .@"union" => return 0, - .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - const layout = union_obj.getLayout(mod, true); - if (layout.tag_align >= layout.payload_align) { - // {Tag, Payload} - return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); - } else { - // {Payload, Tag} - return 0; - } - }, else => unreachable, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -4917,6 +4867,20 @@ pub const Type = struct { return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); }, + .union_type => |union_type| { + if (!union_type.hasTag()) + return 0; + const union_obj = mod.unionPtr(union_type.index); + const layout = union_obj.getLayout(mod, true); + if (layout.tag_align >= layout.payload_align) { + // {Tag, Payload} + return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + } else { + // {Payload, Tag} + return 0; + } + }, + else => unreachable, }, } @@ -4946,10 +4910,6 @@ pub const Type = struct { const error_set = ty.castTag(.error_set).?.data; return error_set.srcLoc(mod); }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.srcLoc(mod); - }, else => return null, }, @@ -4958,7 +4918,10 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; return struct_obj.srcLoc(mod); }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.srcLoc(mod); + }, .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), else => null, }, @@ -4985,10 +4948,6 @@ pub const Type = struct { const error_set = ty.castTag(.error_set).?.data; return error_set.owner_decl; }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - return union_obj.owner_decl; - }, else => return null, }, @@ -4997,7 +4956,10 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; return struct_obj.owner_decl; }, - .union_type => @panic("TODO"), + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.owner_decl; + }, .opaque_type => |opaque_type| opaque_type.decl, else => null, }, @@ -5039,9 +5001,6 @@ pub const Type = struct { /// The type is the inferred error set of a specific function. error_set_inferred, error_set_merged, - @"union", - union_safety_tagged, - union_tagged, enum_simple, enum_numbered, enum_full, @@ -5070,7 +5029,6 @@ pub const Type = struct { .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, - .@"union", .union_safety_tagged, .union_tagged => Payload.Union, .enum_full, .enum_nonexhaustive => Payload.EnumFull, .enum_simple => Payload.EnumSimple, .enum_numbered => Payload.EnumNumbered, @@ -5373,11 +5331,6 @@ pub const Type = struct { }; }; - pub const Union = struct { - base: Payload, - data: *Module.Union, - }; - pub const EnumFull = struct { base: Payload, data: *Module.EnumFull, diff --git a/src/value.zig b/src/value.zig index 3992888b3d..dfeaa44428 100644 --- a/src/value.zig +++ b/src/value.zig @@ -715,7 +715,7 @@ pub const Value = struct { } pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(), mod); + if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(mod), mod); const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, @@ -1138,7 +1138,7 @@ pub const Value = struct { .Extern => unreachable, // Handled in non-packed writeToMemory .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); - const field_type = ty.unionFields().values()[field_index.?].ty; + const field_type = ty.unionFields(mod).values()[field_index.?].ty; const field_val = try val.fieldValue(field_type, mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); @@ -2021,7 +2021,7 @@ pub const Value = struct { const b_union = b.castTag(.@"union").?.data; switch (ty.containerLayout(mod)) { .Packed, .Extern => { - const tag_ty = ty.unionTagTypeHypothetical(); + const tag_ty = ty.unionTagTypeHypothetical(mod); if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) { // In this case, we must disregard mismatching tags and compare // based on the in-memory bytes of the payloads. @@ -2029,7 +2029,7 @@ pub const Value = struct { } }, .Auto => { - const tag_ty = ty.unionTagTypeHypothetical(); + const tag_ty = ty.unionTagTypeHypothetical(mod); if (!(try eqlAdvanced(a_union.tag, tag_ty, b_union.tag, tag_ty, mod, opt_sema))) { return false; } @@ -2118,7 +2118,7 @@ pub const Value = struct { return false; } const field_name = tuple.names[0]; - const union_obj = ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(ty).?; const field_index = union_obj.fields.getIndex(field_name) orelse return false; const tag_and_val = b.castTag(.@"union").?.data; var field_tag_buf: Value.Payload.U32 = .{ @@ -2297,7 +2297,7 @@ pub const Value = struct { }, .Union => { const union_obj = val.cast(Payload.Union).?.data; - if (ty.unionTagType()) |tag_ty| { + if (ty.unionTagType(mod)) |tag_ty| { union_obj.tag.hash(tag_ty, hasher, mod); } const active_field_ty = ty.unionFieldType(union_obj.tag, mod); -- cgit v1.2.3 From 5881a2d63771b070107bdc2325aa1bc455b2d926 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 May 2023 00:07:32 -0700 Subject: stage2: move enum types into the InternPool Unlike unions and structs, enums are actually *encoded* into the InternPool directly, rather than using the SegmentedList trick. This results in them being quite compact, and greatly improved the ergonomics of using enum types throughout the compiler. It did however require introducing a new concept to the InternPool which is an "incomplete" item - something that is added to gain a permanent Index, but which is then mutated in place. This was necessary because enum tag values and tag types may reference the namespaces created by the enum itself, which required constructing the namespace, decl, and calling analyzeDecl on the decl, which required the decl value, which required the enum type, which required an InternPool index to be assigned and for it to be meaningful. The API for updating enums in place turned out to be quite slick and efficient - the methods directly populate pre-allocated arrays and return the information necessary to output the same compilation errors as before. --- src/AstGen.zig | 4 +- src/InternPool.zig | 465 ++++++++++++++++++++++---- src/Module.zig | 195 ++--------- src/Sema.zig | 774 +++++++++++++++++++------------------------- src/TypedValue.zig | 2 +- src/arch/wasm/CodeGen.zig | 50 +-- src/arch/x86_64/CodeGen.zig | 9 +- src/codegen.zig | 32 +- src/codegen/c.zig | 32 +- src/codegen/llvm.zig | 64 ++-- src/link/Dwarf.zig | 21 +- src/type.zig | 402 +++++------------------ src/value.zig | 62 +--- 13 files changed, 934 insertions(+), 1178 deletions(-) (limited to 'src/arch') diff --git a/src/AstGen.zig b/src/AstGen.zig index edd6099127..998e08ba04 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -10694,8 +10694,8 @@ fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 { const string_bytes = &astgen.string_bytes; const str_index = @intCast(u32, string_bytes.items.len); try astgen.appendIdentStr(ident_token, string_bytes); - const key = string_bytes.items[str_index..]; - const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{ + const key: []const u8 = string_bytes.items[str_index..]; + const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ .bytes = string_bytes, }, StringIndexContext{ .bytes = string_bytes, diff --git a/src/InternPool.zig b/src/InternPool.zig index 4c7b7016ea..6ff68a7583 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -40,6 +40,14 @@ unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, /// to provide lookup. maps: std.ArrayListUnmanaged(std.AutoArrayHashMapUnmanaged(void, void)) = .{}, +/// Used for finding the index inside `string_bytes`. +string_table: std.HashMapUnmanaged( + u32, + void, + std.hash_map.StringIndexContext, + std.hash_map.default_max_load_percentage, +) = .{}, + const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -68,6 +76,11 @@ const KeyAdapter = struct { pub const OptionalMapIndex = enum(u32) { none = std.math.maxInt(u32), _, + + pub fn unwrap(oi: OptionalMapIndex) ?MapIndex { + if (oi == .none) return null; + return @intToEnum(MapIndex, @enumToInt(oi)); + } }; /// An index into `maps`. @@ -83,6 +96,10 @@ pub const MapIndex = enum(u32) { pub const NullTerminatedString = enum(u32) { _, + pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { + return @intToEnum(OptionalNullTerminatedString, @enumToInt(self)); + } + const Adapter = struct { strings: []const NullTerminatedString, @@ -102,6 +119,11 @@ pub const NullTerminatedString = enum(u32) { pub const OptionalNullTerminatedString = enum(u32) { none = std.math.maxInt(u32), _, + + pub fn unwrap(oi: OptionalNullTerminatedString) ?NullTerminatedString { + if (oi == .none) return null; + return @intToEnum(NullTerminatedString, @enumToInt(oi)); + } }; pub const Key = union(enum) { @@ -242,13 +264,75 @@ pub const Key = union(enum) { /// Entries are in declaration order, same as `fields`. /// If this is empty, it means the enum tags are auto-numbered. values: []const Index, - /// true if zig inferred this tag type, false if user specified it - tag_ty_inferred: bool, + tag_mode: TagMode, /// This is ignored by `get` but will always be provided by `indexToKey`. names_map: OptionalMapIndex = .none, /// This is ignored by `get` but will be provided by `indexToKey` when /// a value map exists. values_map: OptionalMapIndex = .none, + + pub const TagMode = enum { + /// The integer tag type was auto-numbered by zig. + auto, + /// The integer tag type was provided by the enum declaration, and the enum + /// is exhaustive. + explicit, + /// The integer tag type was provided by the enum declaration, and the enum + /// is non-exhaustive. + nonexhaustive, + }; + + /// Look up field index based on field name. + pub fn nameIndex(self: EnumType, ip: InternPool, name: NullTerminatedString) ?usize { + const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; + return map.getIndexAdapted(name, adapter); + } + + /// Look up field index based on tag value. + /// Asserts that `values_map` is not `none`. + /// This function returns `null` when `tag_val` does not have the + /// integer tag type of the enum. + pub fn tagValueIndex(self: EnumType, ip: InternPool, tag_val: Index) ?usize { + assert(tag_val != .none); + const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)]; + const adapter: Index.Adapter = .{ .indexes = self.values }; + return map.getIndexAdapted(tag_val, adapter); + } + }; + + pub const IncompleteEnumType = struct { + /// Same as corresponding `EnumType` field. + decl: Module.Decl.Index, + /// Same as corresponding `EnumType` field. + namespace: Module.Namespace.OptionalIndex, + /// The field names and field values are not known yet, but + /// the number of fields must be known ahead of time. + fields_len: u32, + /// This information is needed so that the size does not change + /// later when populating field values. + has_values: bool, + /// Same as corresponding `EnumType` field. + tag_mode: EnumType.TagMode, + /// This may be updated via `setTagType` later. + tag_ty: Index = .none, + + pub fn toEnumType(self: @This()) EnumType { + return .{ + .decl = self.decl, + .namespace = self.namespace, + .tag_ty = self.tag_ty, + .tag_mode = self.tag_mode, + .names = &.{}, + .values = &.{}, + }; + } + + /// Only the decl is used for hashing and equality, so we can construct + /// this minimal key for use with `map`. + pub fn toKey(self: @This()) Key { + return .{ .enum_type = self.toEnumType() }; + } }; pub const Int = struct { @@ -946,12 +1030,18 @@ pub const Tag = enum(u8) { /// An error union type. /// data is payload to ErrorUnion. type_error_union, - /// An enum type with an explicitly provided integer tag type. - /// data is payload index to `EnumExplicit`. - type_enum_explicit, /// An enum type with auto-numbered tag values. + /// The enum is exhaustive. /// data is payload index to `EnumAuto`. type_enum_auto, + /// An enum type with an explicitly provided integer tag type. + /// The enum is exhaustive. + /// data is payload index to `EnumExplicit`. + type_enum_explicit, + /// An enum type with an explicitly provided integer tag type. + /// The enum is non-exhaustive. + /// data is payload index to `EnumExplicit`. + type_enum_nonexhaustive, /// A type that can be represented with only an enum tag. /// data is SimpleType enum value. simple_type, @@ -1302,9 +1392,11 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); - for (ip.maps) |*map| map.deinit(gpa); + for (ip.maps.items) |*map| map.deinit(gpa); ip.maps.deinit(gpa); + ip.string_table.deinit(gpa); + ip.* = undefined; } @@ -1421,33 +1513,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .tag_ty = ip.getEnumIntTagType(enum_auto.data.fields_len), .names = names, .values = &.{}, - .tag_ty_inferred = true, + .tag_mode = .auto, .names_map = enum_auto.data.names_map.toOptional(), .values_map = .none, } }; }, - .type_enum_explicit => { - const enum_explicit = ip.extraDataTrail(EnumExplicit, data); - const names = @ptrCast( - []const NullTerminatedString, - ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len], - ); - const values = if (enum_explicit.data.values_map != .none) @ptrCast( - []const Index, - ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len], - ) else &[0]Index{}; - - return .{ .enum_type = .{ - .decl = enum_explicit.data.decl, - .namespace = enum_explicit.data.namespace, - .tag_ty = enum_explicit.data.int_tag_type, - .names = names, - .values = values, - .tag_ty_inferred = false, - .names_map = enum_explicit.data.names_map.toOptional(), - .values_map = enum_explicit.data.values_map, - } }; - }, + .type_enum_explicit => indexToKeyEnum(ip, data, .explicit), + .type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive), .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), @@ -1531,6 +1603,29 @@ fn getEnumIntTagType(ip: InternPool, fields_len: u32) Index { } }); } +fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { + const enum_explicit = ip.extraDataTrail(EnumExplicit, data); + const names = @ptrCast( + []const NullTerminatedString, + ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len], + ); + const values = if (enum_explicit.data.values_map != .none) @ptrCast( + []const Index, + ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len], + ) else &[0]Index{}; + + return .{ .enum_type = .{ + .decl = enum_explicit.data.decl, + .namespace = enum_explicit.data.namespace, + .tag_ty = enum_explicit.data.int_tag_type, + .names = names, + .values = values, + .tag_mode = tag_mode, + .names_map = enum_explicit.data.names_map.toOptional(), + .values_map = enum_explicit.data.values_map, + } }; +} + fn indexToKeyBigInt(ip: InternPool, limb_index: u32, positive: bool) Key { const int_info = ip.limbData(Int, limb_index); return .{ .int = .{ @@ -1696,47 +1791,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(enum_type.names_map == .none); assert(enum_type.values_map == .none); - const names_map = try ip.addMap(gpa); - try addStringsToMap(ip, gpa, names_map, enum_type.names); + switch (enum_type.tag_mode) { + .auto => { + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, enum_type.names); - const fields_len = @intCast(u32, enum_type.names.len); - - if (enum_type.tag_ty_inferred) { - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + - fields_len); - ip.items.appendAssumeCapacity(.{ - .tag = .type_enum_auto, - .data = ip.addExtraAssumeCapacity(EnumAuto{ - .decl = enum_type.decl, - .namespace = enum_type.namespace, - .names_map = names_map, - .fields_len = fields_len, - }), - }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); - return @intToEnum(Index, ip.items.len - 1); + const fields_len = @intCast(u32, enum_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + + fields_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_enum_auto, + .data = ip.addExtraAssumeCapacity(EnumAuto{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .names_map = names_map, + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); + return @intToEnum(Index, ip.items.len - 1); + }, + .explicit => return finishGetEnum(ip, gpa, enum_type, .type_enum_explicit), + .nonexhaustive => return finishGetEnum(ip, gpa, enum_type, .type_enum_nonexhaustive), } - - const values_map: OptionalMapIndex = if (enum_type.values.len == 0) .none else m: { - const values_map = try ip.addMap(gpa); - try addIndexesToMap(ip, gpa, values_map, enum_type.values); - break :m values_map.toOptional(); - }; - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + - fields_len); - ip.items.appendAssumeCapacity(.{ - .tag = .type_enum_auto, - .data = ip.addExtraAssumeCapacity(EnumExplicit{ - .decl = enum_type.decl, - .namespace = enum_type.namespace, - .int_tag_type = enum_type.tag_ty, - .fields_len = fields_len, - .names_map = names_map, - .values_map = values_map, - }), - }); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); - ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.values)); }, .extern_func => @panic("TODO"), @@ -1934,8 +2011,206 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } -pub fn getAssumeExists(ip: InternPool, key: Key) Index { - const adapter: KeyAdapter = .{ .intern_pool = &ip }; +/// Provides API for completing an enum type after calling `getIncompleteEnum`. +pub const IncompleteEnumType = struct { + index: Index, + tag_ty_index: u32, + names_map: MapIndex, + names_start: u32, + values_map: OptionalMapIndex, + values_start: u32, + + pub fn setTagType(self: @This(), ip: *InternPool, tag_ty: Index) void { + assert(tag_ty != .none); + ip.extra.items[self.tag_ty_index] = @enumToInt(tag_ty); + } + + /// Returns the already-existing field with the same name, if any. + pub fn addFieldName( + self: @This(), + ip: *InternPool, + gpa: Allocator, + name: NullTerminatedString, + ) Allocator.Error!?u32 { + const map = &ip.maps.items[@enumToInt(self.names_map)]; + const field_index = map.count(); + const strings = ip.extra.items[self.names_start..][0..field_index]; + const adapter: NullTerminatedString.Adapter = .{ + .strings = @ptrCast([]const NullTerminatedString, strings), + }; + const gop = try map.getOrPutAdapted(gpa, name, adapter); + if (gop.found_existing) return @intCast(u32, gop.index); + ip.extra.items[self.names_start + field_index] = @enumToInt(name); + return null; + } + + /// Returns the already-existing field with the same value, if any. + /// Make sure the type of the value has the integer tag type of the enum. + pub fn addFieldValue( + self: @This(), + ip: *InternPool, + gpa: Allocator, + value: Index, + ) Allocator.Error!?u32 { + const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)]; + const field_index = map.count(); + const indexes = ip.extra.items[self.values_start..][0..field_index]; + const adapter: Index.Adapter = .{ + .indexes = @ptrCast([]const Index, indexes), + }; + const gop = try map.getOrPutAdapted(gpa, value, adapter); + if (gop.found_existing) return @intCast(u32, gop.index); + ip.extra.items[self.values_start + field_index] = @enumToInt(value); + return null; + } +}; + +/// This is used to create an enum type in the `InternPool`, with the ability +/// to update the tag type, field names, and field values later. +pub fn getIncompleteEnum( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, +) Allocator.Error!InternPool.IncompleteEnumType { + switch (enum_type.tag_mode) { + .auto => return getIncompleteEnumAuto(ip, gpa, enum_type), + .explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit), + .nonexhaustive => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_nonexhaustive), + } +} + +pub fn getIncompleteEnumAuto( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, +) Allocator.Error!InternPool.IncompleteEnumType { + // Although the integer tag type will not be stored in the `EnumAuto` struct, + // `InternPool` logic depends on it being present so that `typeOf` can be infallible. + // Ensure it is present here: + _ = try ip.get(gpa, .{ .int_type = .{ + .bits = if (enum_type.fields_len == 0) 0 else std.math.log2_int_ceil(u32, enum_type.fields_len), + .signedness = .unsigned, + } }); + + // We must keep the map in sync with `items`. The hash and equality functions + // for enum types only look at the decl field, which is present even in + // an `IncompleteEnumType`. + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter); + assert(!gop.found_existing); + + const names_map = try ip.addMap(gpa); + + const extra_fields_len: u32 = @typeInfo(EnumAuto).Struct.fields.len; + try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + enum_type.fields_len); + + const extra_index = ip.addExtraAssumeCapacity(EnumAuto{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .names_map = names_map, + .fields_len = enum_type.fields_len, + }); + + ip.items.appendAssumeCapacity(.{ + .tag = .type_enum_auto, + .data = extra_index, + }); + ip.extra.appendNTimesAssumeCapacity(@enumToInt(Index.none), enum_type.fields_len); + return .{ + .index = @intToEnum(Index, ip.items.len - 1), + .tag_ty_index = undefined, + .names_map = names_map, + .names_start = extra_index + extra_fields_len, + .values_map = .none, + .values_start = undefined, + }; +} + +pub fn getIncompleteEnumExplicit( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, + tag: Tag, +) Allocator.Error!InternPool.IncompleteEnumType { + // We must keep the map in sync with `items`. The hash and equality functions + // for enum types only look at the decl field, which is present even in + // an `IncompleteEnumType`. + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter); + assert(!gop.found_existing); + + const names_map = try ip.addMap(gpa); + const values_map: OptionalMapIndex = if (!enum_type.has_values) .none else m: { + const values_map = try ip.addMap(gpa); + break :m values_map.toOptional(); + }; + + const reserved_len = enum_type.fields_len + + if (enum_type.has_values) enum_type.fields_len else 0; + + const extra_fields_len: u32 = @typeInfo(EnumExplicit).Struct.fields.len; + try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + reserved_len); + + const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .fields_len = enum_type.fields_len, + .names_map = names_map, + .values_map = values_map, + }); + + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = extra_index, + }); + // This is both fields and values (if present). + ip.extra.appendNTimesAssumeCapacity(@enumToInt(Index.none), reserved_len); + return .{ + .index = @intToEnum(Index, ip.items.len - 1), + .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?, + .names_map = names_map, + .names_start = extra_index + extra_fields_len, + .values_map = values_map, + .values_start = extra_index + extra_fields_len + enum_type.fields_len, + }; +} + +pub fn finishGetEnum( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.EnumType, + tag: Tag, +) Allocator.Error!Index { + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, enum_type.names); + + const values_map: OptionalMapIndex = if (enum_type.values.len == 0) .none else m: { + const values_map = try ip.addMap(gpa); + try addIndexesToMap(ip, gpa, values_map, enum_type.values); + break :m values_map.toOptional(); + }; + const fields_len = @intCast(u32, enum_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + + fields_len); + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = ip.addExtraAssumeCapacity(EnumExplicit{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .fields_len = fields_len, + .names_map = names_map, + .values_map = values_map, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.values)); + return @intToEnum(Index, ip.items.len - 1); +} + +pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { + const adapter: KeyAdapter = .{ .intern_pool = ip }; const index = ip.map.getIndexAdapted(key, adapter).?; return @intToEnum(Index, index); } @@ -1979,6 +2254,7 @@ fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { pub fn remove(ip: *InternPool, index: Index) void { _ = ip; _ = index; + @setCold(true); @panic("TODO this is a bit problematic to implement, could we maybe just never support a remove() operation on InternPool?"); } @@ -2336,7 +2612,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_slice => 0, .type_optional => 0, .type_error_union => @sizeOf(ErrorUnion), - .type_enum_explicit => @sizeOf(EnumExplicit), + .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), .type_enum_auto => @sizeOf(EnumAuto), .type_opaque => @sizeOf(Key.OpaqueType), .type_struct => @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), @@ -2448,3 +2724,50 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) // allocation failures here, instead leaking the Union until garbage collection. }; } + +pub fn getOrPutString( + ip: *InternPool, + gpa: Allocator, + s: []const u8, +) Allocator.Error!NullTerminatedString { + const string_bytes = &ip.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len); + try string_bytes.ensureUnusedCapacity(gpa, s.len + 1); + string_bytes.appendSliceAssumeCapacity(s); + const key: []const u8 = string_bytes.items[str_index..]; + const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{ + .bytes = string_bytes, + }, std.hash_map.StringIndexContext{ + .bytes = string_bytes, + }); + if (gop.found_existing) { + string_bytes.shrinkRetainingCapacity(str_index); + return @intToEnum(NullTerminatedString, gop.key_ptr.*); + } else { + gop.key_ptr.* = str_index; + string_bytes.appendAssumeCapacity(0); + return @intToEnum(NullTerminatedString, str_index); + } +} + +pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { + if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{ + .bytes = &ip.string_bytes, + })) |index| { + return @intToEnum(NullTerminatedString, index).toOptional(); + } else { + return .none; + } +} + +pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { + const string_bytes = ip.string_bytes.items; + const start = @enumToInt(s); + var end: usize = start; + while (string_bytes[end] != 0) end += 1; + return string_bytes[start..end :0]; +} + +pub fn typeOf(ip: InternPool, index: Index) Index { + return ip.indexToKey(index).typeOf(); +} diff --git a/src/Module.zig b/src/Module.zig index 6478f7ce4f..6bcd148e67 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -886,29 +886,17 @@ pub const Decl = struct { /// Only returns it if the Decl is the owner. pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex { if (!decl.owns_tv) return .none; - switch (decl.val.ip_index) { - .empty_struct_type => return .none, - .none => { - const ty = (decl.val.castTag(.ty) orelse return .none).data; - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; - return enum_obj.namespace.toOptional(); - }, - - else => return .none, - } - }, - else => return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + return switch (decl.val.ip_index) { + .empty_struct_type => .none, + .none => .none, + else => switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), .struct_type => |struct_type| struct_type.namespace, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.namespace.toOptional(); - }, + .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + .enum_type => |enum_type| enum_type.namespace, else => .none, }, - } + }; } /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. @@ -1135,28 +1123,6 @@ pub const Struct = struct { return mod.declPtr(s.owner_decl).srcLoc(mod); } - pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(s.owner_decl); - const file = owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return s.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This struct was generated using @Type - return s.srcLoc(mod); - } - } - pub fn haveFieldTypes(s: Struct) bool { return switch (s.status) { .none, @@ -1237,110 +1203,6 @@ pub const Struct = struct { } }; -/// Represents the data that an enum declaration provides, when the fields -/// are auto-numbered, and there are no declarations. The integer tag type -/// is inferred to be the smallest power of two unsigned int that fits -/// the number of fields. -pub const EnumSimple = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// Set of field names in declaration order. - fields: NameMap, - - pub const NameMap = EnumFull.NameMap; - - pub fn srcLoc(self: EnumSimple, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } -}; - -/// Represents the data that an enum declaration provides, when there are no -/// declarations. However an integer tag type is provided, and the enum tag values -/// are explicitly provided. -pub const EnumNumbered = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// An integer type which is used for the numerical value of the enum. - /// Whether zig chooses this type or the user specifies it, it is stored here. - tag_ty: Type, - /// Set of field names in declaration order. - fields: NameMap, - /// Maps integer tag value to field index. - /// Entries are in declaration order, same as `fields`. - /// If this hash map is empty, it means the enum tags are auto-numbered. - values: ValueMap, - - pub const NameMap = EnumFull.NameMap; - pub const ValueMap = EnumFull.ValueMap; - - pub fn srcLoc(self: EnumNumbered, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } -}; - -/// Represents the data that an enum declaration provides, when there is -/// at least one tag value explicitly specified, or at least one declaration. -pub const EnumFull = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// An integer type which is used for the numerical value of the enum. - /// Whether zig chooses this type or the user specifies it, it is stored here. - tag_ty: Type, - /// Set of field names in declaration order. - fields: NameMap, - /// Maps integer tag value to field index. - /// Entries are in declaration order, same as `fields`. - /// If this hash map is empty, it means the enum tags are auto-numbered. - values: ValueMap, - /// Represents the declarations inside this enum. - namespace: Namespace.Index, - /// true if zig inferred this tag type, false if user specified it - tag_ty_inferred: bool, - - pub const NameMap = std.StringArrayHashMapUnmanaged(void); - pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false); - - pub fn srcLoc(self: EnumFull, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - pub fn fieldSrcLoc(e: EnumFull, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(e.owner_decl); - const file = owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return e.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This enum was generated using @Type - return e.srcLoc(mod); - } - } -}; - pub const Union = struct { /// An enum type which is used for the tag of the union. /// This type is created even for untagged unions, even when the memory @@ -1427,28 +1289,6 @@ pub const Union = struct { }; } - pub fn fieldSrcLoc(u: Union, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(u.owner_decl); - const file = owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return u.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This union was generated using @Type - return u.srcLoc(mod); - } - } - pub fn haveFieldTypes(u: Union) bool { return switch (u.status) { .none, @@ -7313,3 +7153,24 @@ pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { const union_index = mod.intern_pool.indexToUnion(ty.ip_index).unwrap() orelse return null; return mod.unionPtr(union_index); } + +pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { + @setCold(true); + const owner_decl = mod.declPtr(owner_decl_index); + const file = owner_decl.getFileScope(mod); + const tree = file.getTree(mod.gpa) catch |err| { + // In this case we emit a warning + a less precise source location. + log.warn("unable to load {s}: {s}", .{ + file.sub_file_path, @errorName(err), + }); + return owner_decl.srcLoc(mod); + }; + const node = owner_decl.relativeToNodeIndex(0); + var buf: [2]Ast.Node.Index = undefined; + if (tree.fullContainerDecl(&buf, node)) |container_decl| { + return queryFieldSrc(tree.*, query, file, container_decl); + } else { + // This type was generated using @Type + return owner_decl.srcLoc(mod); + } +} diff --git a/src/Sema.zig b/src/Sema.zig index e9bf66565e..b94f995b46 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2096,7 +2096,7 @@ fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazyS errdefer msg.destroy(sema.gpa); const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg; - const default_value_src = struct_ty.fieldSrcLoc(mod, .{ + const default_value_src = mod.fieldSrcLoc(struct_ty.owner_decl, .{ .index = field_index, .range = .value, }); @@ -2875,50 +2875,28 @@ fn zirEnumDecl( break :blk decls_len; } else 0; - var done = false; - - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer if (!done) new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the enum type gains an + // InternPool index. - const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); - enum_ty_payload.* = .{ - .base = .{ .tag = if (small.nonexhaustive) .enum_nonexhaustive else .enum_full }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); + var done = false; const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = enum_val, + .val = undefined, }, small.name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer if (!done) mod.abortAnonDecl(new_decl_index); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = Type.null, - .tag_ty_inferred = true, - .fields = .{}, - .values = .{}, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = enum_ty, - .file_scope = block.getFileScope(mod), - }), - }; - - try new_decl.finalizeNewArena(&new_decl_arena); - const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); - done = true; - - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = new_decl.value_arena.?.acquire(gpa, &decl_arena); - defer new_decl.value_arena.?.release(&decl_arena); + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer if (!done) mod.destroyNamespace(new_namespace_index); - extra_index = try mod.scanNamespace(enum_obj.namespace, extra_index, decls_len, new_decl); + extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; @@ -2927,7 +2905,31 @@ fn zirEnumDecl( const body_end = extra_index; extra_index += bit_bags_count; - { + const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { + if (bag != 0) break true; + } else false; + + const incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{ + .decl = new_decl_index, + .namespace = new_namespace_index.toOptional(), + .fields_len = fields_len, + .has_values = any_values, + .tag_mode = if (small.nonexhaustive) + .nonexhaustive + else if (tag_type_ref == .none) + .auto + else + .explicit, + }); + errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index); + + new_decl.val = incomplete_enum.index.toValue(); + new_namespace.ty = incomplete_enum.index.toType(); + + const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); + done = true; + + const int_tag_ty = ty: { // We create a block for the field type instructions because they // may need to reference Decls from inside the enum namespace. // Within the field type, default value, and alignment expressions, the "owner decl" @@ -2957,7 +2959,7 @@ fn zirEnumDecl( .parent = null, .sema = sema, .src_decl = new_decl_index, - .namespace = enum_obj.namespace, + .namespace = new_namespace_index, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -2976,35 +2978,22 @@ fn zirEnumDecl( if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); } - enum_obj.tag_ty = try ty.copy(decl_arena_allocator); - enum_obj.tag_ty_inferred = false; + incomplete_enum.setTagType(&mod.intern_pool, ty.ip_index); + break :ty ty; } else if (fields_len == 0) { - enum_obj.tag_ty = try mod.intType(.unsigned, 0); - enum_obj.tag_ty_inferred = true; + break :ty try mod.intType(.unsigned, 0); } else { const bits = std.math.log2_int_ceil(usize, fields_len); - enum_obj.tag_ty = try mod.intType(.unsigned, bits); - enum_obj.tag_ty_inferred = true; + break :ty try mod.intType(.unsigned, bits); } - } + }; - if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag(mod) != .ComptimeInt) { - if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(mod)) { + if (small.nonexhaustive and int_tag_ty.ip_index != .comptime_int_type) { + if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } } - try enum_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); - const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { - if (bag != 0) break true; - } else false; - if (any_values) { - try enum_obj.values.ensureTotalCapacityContext(decl_arena_allocator, fields_len, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - } - var bit_bag_index: usize = body_end; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; @@ -3023,15 +3012,12 @@ fn zirEnumDecl( // doc comment extra_index += 1; - // This string needs to outlive the ZIR code. - const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); - - const gop_field = enum_obj.fields.getOrPutAssumeCapacity(field_name); - if (gop_field.found_existing) { - const field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_field.index }).lazy; + const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir); + if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name)) |other_index| { + const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "duplicate enum field '{s}'", .{field_name}); + const msg = try sema.errMsg(block, field_src, "duplicate enum field '{s}'", .{field_name_zir}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other field here", .{}); break :msg msg; @@ -3045,7 +3031,7 @@ fn zirEnumDecl( const tag_inst = try sema.resolveInst(tag_val_ref); const tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) { error.NeededSourceLocation => { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, }).lazy; @@ -3055,19 +3041,14 @@ fn zirEnumDecl( else => |e| return e, }; last_tag_val = tag_val; - const copied_tag_val = try tag_val.copy(decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| { + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_val.index }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{tag_val.fmtValue(enum_obj.tag_ty, sema.mod)}); + const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{tag_val.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -3076,20 +3057,15 @@ fn zirEnumDecl( } } else if (any_values) { const tag_val = if (last_tag_val) |val| - try sema.intAdd(val, try mod.intValue(enum_obj.tag_ty, 1), enum_obj.tag_ty) + try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty) else - try mod.intValue(enum_obj.tag_ty, 0); + try mod.intValue(int_tag_ty, 0); last_tag_val = tag_val; - const copied_tag_val = try tag_val.copy(decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { - const field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_val.index }).lazy; + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| { + const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{tag_val.fmtValue(enum_obj.tag_ty, sema.mod)}); + const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{tag_val.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -3097,16 +3073,16 @@ fn zirEnumDecl( return sema.failWithOwnedErrorMsg(msg); } } else { - last_tag_val = try mod.intValue(enum_obj.tag_ty, field_i); + last_tag_val = try mod.intValue(int_tag_ty, field_i); } - if (!(try sema.intFitsInType(last_tag_val.?, enum_obj.tag_ty, null))) { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) { + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = if (has_tag_value) .value else .name, }).lazy; const msg = try sema.errMsg(block, value_src, "enumeration value '{}' too large for type '{}'", .{ - last_tag_val.?.fmtValue(enum_obj.tag_ty, mod), enum_obj.tag_ty.fmt(mod), + last_tag_val.?.fmtValue(int_tag_ty, mod), int_tag_ty.fmt(mod), }); return sema.failWithOwnedErrorMsg(msg); } @@ -4356,7 +4332,7 @@ fn validateUnionInit( } const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); if (init_val) |val| { @@ -8334,7 +8310,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.resolveMaybeUndefVal(operand)) |int_val| { - if (dest_ty.isNonexhaustiveEnum()) { + if (dest_ty.isNonexhaustiveEnum(mod)) { const int_tag_ty = try dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { return sema.addConstant(dest_ty, int_val); @@ -8383,7 +8359,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.requireRuntimeBlock(block, src, operand_src); const result = try block.addTyOp(.intcast, dest_ty, operand); - if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum() and + if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(mod) and sema.mod.backendSupportsFeature(.is_named_enum_value)) { const ok = try block.addUnOp(.is_named_enum_value, result); @@ -10518,7 +10494,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var else_error_ty: ?Type = null; // Validate usage of '_' prongs. - if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { + if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { const msg = msg: { const msg = try sema.errMsg( block, @@ -10543,8 +10519,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError switch (operand_ty.zigTypeTag(mod)) { .Union => unreachable, // handled in zirSwitchCond .Enum => { - seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); - empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(); + seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount(mod)); + empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(mod); @memset(seen_enum_fields, null); // `range_set` is used for non-exhaustive enum values that do not correspond to any tags. @@ -10599,7 +10575,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } else true; if (special_prong == .@"else") { - if (all_tags_handled and !operand_ty.isNonexhaustiveEnum()) return sema.fail( + if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(mod)) return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", @@ -10617,7 +10593,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (seen_enum_fields, 0..) |seen_src, i| { if (seen_src != null) continue; - const field_name = operand_ty.enumFieldName(i); + const field_name = operand_ty.enumFieldName(i, mod); try sema.addFieldErrNote( operand_ty, i, @@ -10635,7 +10611,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum() and !union_originally) { + } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail( block, src, @@ -11159,7 +11135,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return Air.Inst.Ref.unreachable_value; } if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and - (!operand_ty.isNonexhaustiveEnum() or union_originally)) + (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); const ok = try block.addUnOp(.is_named_enum_value, operand); @@ -11489,7 +11465,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var emit_bb = false; if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) { .Enum => { - if (operand_ty.isNonexhaustiveEnum() and !union_originally) { + if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ operand_ty.fmt(mod), }); @@ -11629,7 +11605,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.inline_case_capture = .none; if (mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and - operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) + operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); const ok = try case_block.addUnOp(.is_named_enum_value, operand); @@ -12081,7 +12057,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :hf switch (ty.zigTypeTag(mod)) { .Struct => ty.structFields(mod).contains(field_name), .Union => ty.unionFields(mod).contains(field_name), - .Enum => ty.enumFields().contains(field_name), + .Enum => ty.enumFieldIndex(field_name, mod) != null, .Array => mem.eql(u8, field_name, "len"), else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ ty.fmt(sema.mod), @@ -16300,9 +16276,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Enum => { // TODO: look into memoizing this result. - const int_tag_ty = try ty.intTagType(mod); + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; - const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum()); + const is_exhaustive = Value.makeBool(enum_type.tag_mode != .nonexhaustive); var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); @@ -16320,25 +16296,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t try enum_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); }; - const enum_fields = ty.enumFields(); - const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_fields.count()); + const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, i), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); - - const int_val = try tag_val.enumToInt(ty, mod); - - const name = enum_fields.keys()[i]; + const name_ip = enum_type.names[i]; + const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), + try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); @@ -16350,7 +16318,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // value: comptime_int, - int_val, + try mod.intValue(Type.comptime_int, i), }; field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), enum_field_fields); } @@ -16370,12 +16338,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :v try Value.Tag.decl_ref.create(sema.arena, new_decl); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace(mod)); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, enum_type.namespace); const field_values = try sema.arena.create([4]Value); field_values.* = .{ // tag_type: type, - try Value.Tag.ty.create(sema.arena, int_tag_ty), + enum_type.tag_ty.toValue(), // fields: []const EnumField, fields_val, // decls: []const Declaration, @@ -16468,7 +16436,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespace(mod)); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); const enum_tag_ty_val = if (union_ty.unionTagType(mod)) |tag_ty| v: { const ty_val = try Value.Tag.ty.create(sema.arena, tag_ty); @@ -16631,7 +16599,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace(mod)); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod)); const backing_integer_val = blk: { if (layout == .Packed) { @@ -16674,7 +16642,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // TODO: look into memoizing this result. const opaque_ty = try sema.resolveTypeFields(ty); - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespace(mod)); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespaceIndex(mod)); const field_values = try sema.arena.create([1]Value); field_values.* = .{ @@ -16700,7 +16668,7 @@ fn typeInfoDecls( block: *Block, src: LazySrcLoc, type_info_ty: Type, - opt_namespace: ?*Module.Namespace, + opt_namespace: Module.Namespace.OptionalIndex, ) CompileError!Value { const mod = sema.mod; var decls_anon_decl = try block.startAnonDecl(); @@ -16726,8 +16694,9 @@ fn typeInfoDecls( var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa); defer seen_namespaces.deinit(); - if (opt_namespace) |some| { - try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), some, &decl_vals, &seen_namespaces); + if (opt_namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), namespace, &decl_vals, &seen_namespaces); } const new_decl = try decls_anon_decl.finish( @@ -17896,7 +17865,7 @@ fn unionInit( if (try sema.resolveMaybeUndefVal(init)) |init_val| { const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, @@ -17997,7 +17966,7 @@ fn zirStructInit( const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); const init_inst = try sema.resolveInst(item.data.init); @@ -18754,7 +18723,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air operand_ty.fmt(mod), }), }; - if (enum_ty.enumFieldCount() == 0) { + if (enum_ty.enumFieldCount(mod) == 0) { // TODO I don't think this is the correct way to handle this but // it prevents a crash. return sema.fail(block, operand_src, "cannot get @tagName of empty enum '{}'", .{ @@ -18776,7 +18745,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }; - const field_name = enum_ty.enumFieldName(field_index); + const field_name = enum_ty.enumFieldName(field_index, mod); return sema.addStrLit(block, field_name); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -19081,63 +19050,41 @@ fn zirReify( return sema.fail(block, src, "reified enums must have no decls", .{}); } - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + const int_tag_ty = tag_type_val.toType(); + if (int_tag_ty.zigTypeTag(mod) != .Int) { + return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); + } + + // Because these things each reference each other, `undefined` + // placeholders are used before being set after the enum type gains + // an InternPool index. - // Define our empty enum decl - const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); - enum_ty_payload.* = .{ - .base = .{ - .tag = if (!is_exhaustive_val.toBool(mod)) - .enum_nonexhaustive - else - .enum_full, - }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ .ty = Type.type, - .val = enum_val, + .val = undefined, }, name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = Type.null, - .tag_ty_inferred = false, - .fields = .{}, - .values = .{}, - .namespace = try mod.createNamespace(.{ - .parent = block.namespace.toOptional(), - .ty = enum_ty, - .file_scope = block.getFileScope(mod), - }), - }; - - // Enum tag type - const int_tag_ty = try tag_type_val.toType().copy(new_decl_arena_allocator); - - if (int_tag_ty.zigTypeTag(mod) != .Int) { - return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); - } - enum_obj.tag_ty = int_tag_ty; - - // Fields - const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ - .ty = enum_obj.tag_ty, - .mod = mod, + // Define our empty enum decl + const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod))); + const incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{ + .decl = new_decl_index, + .namespace = .none, + .fields_len = fields_len, + .has_values = true, + .tag_mode = if (!is_exhaustive_val.toBool(mod)) + .nonexhaustive + else + .explicit, + .tag_ty = int_tag_ty.ip_index, }); + errdefer mod.intern_pool.remove(incomplete_enum.index); - var field_i: usize = 0; - while (field_i < fields_len) : (field_i += 1) { + new_decl.val = incomplete_enum.index.toValue(); + + for (0..fields_len) |field_i| { const elem_val = try fields_val.elemValue(mod, field_i); const field_struct_val: []const Value = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here @@ -19148,39 +19095,36 @@ fn zirReify( const field_name = try name_val.toAllocatedBytes( Type.const_slice_u8, - new_decl_arena_allocator, + sema.arena, mod, ); + const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); - if (!try sema.intFitsInType(value_val, enum_obj.tag_ty, null)) { + if (!try sema.intFitsInType(value_val, int_tag_ty, null)) { // TODO: better source location return sema.fail(block, src, "field '{s}' with enumeration value '{}' is too large for backing int type '{}'", .{ field_name, value_val.fmtValue(Type.comptime_int, mod), - enum_obj.tag_ty.fmt(mod), + int_tag_ty.fmt(mod), }); } - const gop_field = enum_obj.fields.getOrPutAssumeCapacity(field_name); - if (gop_field.found_existing) { + if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name_ip)) |other_index| { const msg = msg: { const msg = try sema.errMsg(block, src, "duplicate enum field '{s}'", .{field_name}); errdefer msg.destroy(gpa); + _ = other_index; // TODO: this note is incorrect try sema.errNote(block, src, msg, "other field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - const copied_tag_val = try value_val.copy(new_decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, value_val.ip_index)) |other| { const msg = msg: { const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)}); errdefer msg.destroy(gpa); + _ = other; // TODO: this note is incorrect try sema.errNote(block, src, msg, "other enum tag value here", .{}); break :msg msg; }; @@ -19188,7 +19132,6 @@ fn zirReify( } } - try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); }, .Opaque => { @@ -19307,26 +19250,29 @@ fn zirReify( new_namespace.ty = union_ty.toType(); // Tag type - var tag_ty_field_names: ?Module.EnumFull.NameMap = null; - var enum_field_names: ?*Module.EnumNumbered.NameMap = null; const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); + var explicit_tags_seen: []bool = &.{}; + var explicit_enum_info: ?InternPool.Key.EnumType = null; + var enum_field_names: []InternPool.NullTerminatedString = &.{}; if (tag_type_val.optionalValue(mod)) |payload_val| { - union_obj.tag_ty = try payload_val.toType().copy(new_decl_arena_allocator); + union_obj.tag_ty = payload_val.toType(); - if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) { - return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}); - } - tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); + const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { + .enum_type => |x| x, + else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}), + }; + + explicit_enum_info = enum_type; + explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); + @memset(explicit_tags_seen, false); } else { - union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, fields_len, null); - enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } // Fields try union_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - var i: usize = 0; - while (i < fields_len) : (i += 1) { + for (0..fields_len) |i| { const elem_val = try fields_val.elemValue(mod, i); const field_struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here @@ -19343,13 +19289,14 @@ fn zirReify( mod, ); - if (enum_field_names) |set| { - set.putAssumeCapacity(field_name, {}); + const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); + + if (enum_field_names.len != 0) { + enum_field_names[i] = field_name_ip; } - if (tag_ty_field_names) |*names| { - const enum_has_field = names.orderedRemove(field_name); - if (!enum_has_field) { + if (explicit_enum_info) |tag_info| { + const enum_index = tag_info.nameIndex(mod.intern_pool, field_name_ip) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); errdefer msg.destroy(gpa); @@ -19357,7 +19304,11 @@ fn zirReify( break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } + }; + // No check for duplicate because the check already happened in order + // to create the enum type in the first place. + assert(!explicit_tags_seen[enum_index]); + explicit_tags_seen[enum_index] = true; } const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); @@ -19409,22 +19360,26 @@ fn zirReify( } } - if (tag_ty_field_names) |names| { - if (names.count() > 0) { + if (explicit_enum_info) |tag_info| { + if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{}); errdefer msg.destroy(gpa); const enum_ty = union_obj.tag_ty; - for (names.keys()) |field_name| { - const field_index = enum_ty.enumFieldIndex(field_name).?; - try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{field_name}); + for (tag_info.names, 0..) |field_name, field_index| { + if (explicit_tags_seen[field_index]) continue; + try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{ + mod.intern_pool.stringToSlice(field_name), + }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + } else { + union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, enum_field_names, null); } try new_decl.finalizeNewArena(&new_decl_arena); @@ -23450,7 +23405,7 @@ fn explainWhyTypeIsComptimeInner( if (mod.typeToStruct(ty)) |struct_obj| { for (struct_obj.fields.values(), 0..) |field, i| { - const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{ + const field_src_loc = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = i, .range = .type, }); @@ -23469,7 +23424,7 @@ fn explainWhyTypeIsComptimeInner( if (mod.typeToUnion(ty)) |union_obj| { for (union_obj.fields.values(), 0..) |field, i| { - const field_src_loc = union_obj.fieldSrcLoc(sema.mod, .{ + const field_src_loc = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = i, .range = .type, }); @@ -24168,7 +24123,7 @@ fn fieldVal( } const union_ty = try sema.resolveTypeFields(child_type); if (union_ty.unionTagType(mod)) |enum_ty| { - if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| { + if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { const field_index = @intCast(u32, field_index_usize); return sema.addConstant( enum_ty, @@ -24184,7 +24139,7 @@ fn fieldVal( return inst; } } - const field_index_usize = child_type.enumFieldIndex(field_name) orelse + const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index = @intCast(u32, field_index_usize); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index); @@ -24382,7 +24337,7 @@ fn fieldPtr( } const union_ty = try sema.resolveTypeFields(child_type); if (union_ty.unionTagType(mod)) |enum_ty| { - if (enum_ty.enumFieldIndex(field_name)) |field_index| { + if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32 = @intCast(u32, field_index); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -24401,7 +24356,7 @@ fn fieldPtr( return inst; } } - const field_index = child_type.enumFieldIndex(field_name) orelse { + const field_index = child_type.enumFieldIndex(field_name, mod) orelse { return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; const field_index_u32 = @intCast(u32, field_index); @@ -24996,7 +24951,7 @@ fn unionFieldPtr( .@"volatile" = union_ptr_ty.isVolatilePtr(mod), .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod), }); - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { @@ -25028,7 +24983,7 @@ fn unionFieldPtr( if (!tag_matches) { const msg = msg: { const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; - const active_field_name = union_obj.tag_ty.enumFieldName(active_index); + const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); @@ -25083,7 +25038,7 @@ fn unionFieldVal( const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { if (union_val.isUndef()) return sema.addConstUndef(field.ty); @@ -25102,7 +25057,7 @@ fn unionFieldVal( } else { const msg = msg: { const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; - const active_field_name = union_obj.tag_ty.enumFieldName(active_index); + const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); @@ -26191,7 +26146,7 @@ fn coerceExtra( // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); const bytes = val.castTag(.enum_literal).?.data; - const field_index = dest_ty.enumFieldIndex(bytes) orelse { + const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse { const msg = msg: { const msg = try sema.errMsg( block, @@ -28707,7 +28662,7 @@ fn coerceEnumToUnion( try sema.requireRuntimeBlock(block, inst_src, null); - if (tag_ty.isNonexhaustiveEnum()) { + if (tag_ty.isNonexhaustiveEnum(mod)) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{ union_ty.fmt(sema.mod), @@ -31605,7 +31560,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_single, .error_set_inferred, .error_set_merged, - .enum_simple, => false, .function => true, @@ -31646,14 +31600,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const child_ty = ty.castTag(.anyframe_T).?.data; return sema.resolveTypeRequiresComptime(child_ty); }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return sema.resolveTypeRequiresComptime(tag_ty); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return sema.resolveTypeRequiresComptime(tag_ty); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, @@ -31760,7 +31706,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .opaque_type => false, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()), // values, not types .un => unreachable, @@ -32284,12 +32230,12 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { - const field_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; + const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy; const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{field_name}); errdefer msg.destroy(gpa); const prev_field_index = struct_obj.fields.getIndex(field_name).?; - const prev_field_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = prev_field_index }); + const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index }); try sema.mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "struct declared here", .{}); break :msg msg; @@ -32325,7 +32271,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void if (zir_field.type_ref != .none) { break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32341,7 +32287,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const ty_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); break :ty sema.analyzeAsType(&block_scope, .unneeded, ty_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32360,7 +32306,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32374,7 +32320,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32388,7 +32334,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } if (struct_obj.layout == .Extern and !try sema.validateExternType(field.ty, .struct_field)) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }); @@ -32403,7 +32349,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void return sema.failWithOwnedErrorMsg(msg); } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }); @@ -32424,7 +32370,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const align_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); field.abi_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const align_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const align_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .alignment, }).lazy; @@ -32452,7 +32398,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const field = &struct_obj.fields.values()[field_i]; const coerced = sema.coerce(&block_scope, field.ty, init, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const init_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; @@ -32462,7 +32408,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void else => |e| return e, }; const default_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { - const init_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; @@ -32573,9 +32519,11 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); var int_tag_ty: Type = undefined; - var enum_field_names: ?*Module.EnumNumbered.NameMap = null; - var enum_value_map: ?*Module.EnumNumbered.ValueMap = null; - var tag_ty_field_names: ?Module.EnumFull.NameMap = null; + var enum_field_names: []InternPool.NullTerminatedString = &.{}; + var enum_field_vals: []InternPool.Index = &.{}; + var enum_field_vals_map: std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false) = .{}; + var explicit_tags_seen: []bool = &.{}; + var explicit_enum_info: ?InternPool.Key.EnumType = null; if (tag_type_ref != .none) { const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x }; const provided_ty = try sema.resolveType(&block_scope, tag_ty_src, tag_type_ref); @@ -32601,27 +32549,26 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { return sema.failWithOwnedErrorMsg(msg); } } - union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, fields_len, provided_ty, union_obj); - const enum_obj = union_obj.tag_ty.castTag(.enum_numbered).?.data; - enum_field_names = &enum_obj.fields; - enum_value_map = &enum_obj.values; + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); + enum_field_vals = try sema.arena.alloc(InternPool.Index, fields_len); } else { // The provided type is the enum tag type. - union_obj.tag_ty = try provided_ty.copy(decl_arena_allocator); - if (union_obj.tag_ty.zigTypeTag(mod) != .Enum) { - return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}); - } + union_obj.tag_ty = provided_ty; + const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { + .enum_type => |x| x, + else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}), + }; // The fields of the union must match the enum exactly. - // Store a copy of the enum field names so we can check for - // missing or extraneous fields later. - tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); + // A flag per field is used to check for missing and extraneous fields. + explicit_enum_info = enum_type; + explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); + @memset(explicit_tags_seen, false); } } else { // If auto_enum_tag is false, this is an untagged union. However, for semantic analysis // purposes, we still auto-generate an enum tag type the same way. That the union is // untagged is represented by the Type tag (union vs union_tagged). - union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, fields_len, union_obj); - enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } if (fields_len == 0) { @@ -32675,11 +32622,11 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk try sema.resolveInst(tag_ref); } else .none; - if (enum_value_map) |map| { + if (enum_field_vals.len != 0) { const copied_val = if (tag_ref != .none) blk: { const val = sema.semaUnionFieldVal(&block_scope, .unneeded, int_tag_ty, tag_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const val_src = union_obj.fieldSrcLoc(sema.mod, .{ + const val_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; @@ -32690,25 +32637,24 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { }; last_tag_val = val; - // This puts the memory into the union arena, not the enum arena, but - // it is OK since they share the same lifetime. - break :blk try val.copy(decl_arena_allocator); + break :blk val; } else blk: { const val = if (last_tag_val) |val| - try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty) + try sema.intAdd(val, Value.one_comptime_int, int_tag_ty) else try mod.intValue(int_tag_ty, 0); last_tag_val = val; - break :blk try val.copy(decl_arena_allocator); + break :blk val; }; - const gop = map.getOrPutAssumeCapacityContext(copied_val, .{ + enum_field_vals[field_i] = copied_val.ip_index; + const gop = enum_field_vals_map.getOrPutAssumeCapacityContext(copied_val, .{ .ty = int_tag_ty, .mod = mod, }); if (gop.found_existing) { - const field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = gop.index }).lazy; + const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy; const msg = msg: { const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); @@ -32721,8 +32667,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { // This string needs to outlive the ZIR code. const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); - if (enum_field_names) |set| { - set.putAssumeCapacity(field_name, {}); + const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); + if (enum_field_names.len != 0) { + enum_field_names[field_i] = field_name_ip; } const field_ty: Type = if (!has_type) @@ -32732,7 +32679,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { else sema.resolveType(&block_scope, .unneeded, field_type_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32749,12 +32696,12 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { - const field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; + const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{field_name}); errdefer msg.destroy(gpa); const prev_field_index = union_obj.fields.getIndex(field_name).?; - const prev_field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = prev_field_index }).lazy; + const prev_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = prev_field_index }).lazy; try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "union declared here", .{}); break :msg msg; @@ -32762,26 +32709,31 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { return sema.failWithOwnedErrorMsg(msg); } - if (tag_ty_field_names) |*names| { - const enum_has_field = names.orderedRemove(field_name); - if (!enum_has_field) { + if (explicit_enum_info) |tag_info| { + const enum_index = tag_info.nameIndex(mod.intern_pool, field_name_ip) orelse { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; - const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ + field_name, union_obj.tag_ty.fmt(sema.mod), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } + }; + // No check for duplicate because the check already happened in order + // to create the enum type in the first place. + assert(!explicit_tags_seen[enum_index]); + explicit_tags_seen[enum_index] = true; } if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32795,7 +32747,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }); @@ -32810,7 +32762,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { return sema.failWithOwnedErrorMsg(msg); } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }); @@ -32833,7 +32785,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { if (align_ref != .none) { gop.value_ptr.abi_align = sema.resolveAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const align_src = union_obj.fieldSrcLoc(sema.mod, .{ + const align_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .alignment, }).lazy; @@ -32847,22 +32799,28 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } } - if (tag_ty_field_names) |names| { - if (names.count() > 0) { + if (explicit_enum_info) |tag_info| { + if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{}); errdefer msg.destroy(sema.gpa); const enum_ty = union_obj.tag_ty; - for (names.keys()) |field_name| { - const field_index = enum_ty.enumFieldIndex(field_name).?; - try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{field_name}); + for (tag_info.names, 0..) |field_name, field_index| { + if (explicit_tags_seen[field_index]) continue; + try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{ + mod.intern_pool.stringToSlice(field_name), + }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + } else if (enum_field_vals.len != 0) { + union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals, union_obj); + } else { + union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_obj); } } @@ -32874,25 +32832,12 @@ fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Ty fn generateUnionTagTypeNumbered( sema: *Sema, block: *Block, - fields_len: u32, - int_ty: Type, + enum_field_names: []const InternPool.NullTerminatedString, + enum_field_vals: []const InternPool.Index, union_obj: *Module.Union, ) !Type { const mod = sema.mod; - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered); - enum_ty_payload.* = .{ - .base = .{ .tag = .enum_numbered }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); - const src_decl = mod.declPtr(block.src_decl); const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); @@ -32903,53 +32848,45 @@ fn generateUnionTagTypeNumbered( }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, - .val = enum_val, + .val = undefined, }, name); - sema.mod.declPtr(new_decl_index).name_fully_qualified = true; - const new_decl = mod.declPtr(new_decl_index); + new_decl.name_fully_qualified = true; new_decl.owns_tv = true; new_decl.name_fully_qualified = true; errdefer mod.abortAnonDecl(new_decl_index); - const copied_int_ty = try int_ty.copy(new_decl_arena_allocator); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = copied_int_ty, - .fields = .{}, - .values = .{}, - }; - // Here we pre-allocate the maps using the decl arena. - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ - .ty = copied_int_ty, - .mod = mod, - }); - try new_decl.finalizeNewArena(&new_decl_arena); - return enum_ty; -} + const enum_ty = try mod.intern(.{ .enum_type = .{ + .decl = new_decl_index, + .namespace = .none, + .tag_ty = if (enum_field_vals.len == 0) + .noreturn_type + else + mod.intern_pool.typeOf(enum_field_vals[0]), + .names = enum_field_names, + .values = enum_field_vals, + .tag_mode = .explicit, + } }); + errdefer mod.intern_pool.remove(enum_ty); -fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: usize, maybe_union_obj: ?*Module.Union) !Type { - const mod = sema.mod; + new_decl.val = enum_ty.toValue(); - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + return enum_ty.toType(); +} - const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple); - enum_ty_payload.* = .{ - .base = .{ .tag = .enum_simple }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); +fn generateUnionTagTypeSimple( + sema: *Sema, + block: *Block, + enum_field_names: []const InternPool.NullTerminatedString, + maybe_union_obj: ?*Module.Union, +) !Type { + const mod = sema.mod; const new_decl_index = new_decl_index: { const union_obj = maybe_union_obj orelse { break :new_decl_index try mod.createAnonymousDecl(block, .{ .ty = Type.type, - .val = enum_val, + .val = undefined, }); }; const src_decl = mod.declPtr(block.src_decl); @@ -32962,24 +32899,31 @@ fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: usize, may }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, - .val = enum_val, + .val = undefined, }, name); - sema.mod.declPtr(new_decl_index).name_fully_qualified = true; + mod.declPtr(new_decl_index).name_fully_qualified = true; break :new_decl_index new_decl_index; }; + const enum_ty = try mod.intern(.{ .enum_type = .{ + .decl = new_decl_index, + .namespace = .none, + .tag_ty = if (enum_field_names.len == 0) + .noreturn_type + else + (try mod.smallestUnsignedInt(enum_field_names.len - 1)).ip_index, + .names = enum_field_names, + .values = &.{}, + .tag_mode = .auto, + } }); + errdefer mod.intern_pool.remove(enum_ty); + const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; + new_decl.val = enum_ty.toValue(); errdefer mod.abortAnonDecl(new_decl_index); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .fields = .{}, - }; - // Here we pre-allocate the maps using the decl arena. - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try new_decl.finalizeNewArena(&new_decl_arena); - return enum_ty; + return enum_ty.toType(); } fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { @@ -33098,57 +33042,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return Value.empty_struct; }, - .enum_numbered => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; - // An explicit tag type is always provided for enum_numbered. - if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { - return null; - } - if (enum_obj.fields.count() == 1) { - if (enum_obj.values.count() == 0) { - return Value.enum_field_0; // auto-numbered - } else { - return enum_obj.values.keys()[0]; - } - } else { - return null; - } - }, - .enum_full => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_obj = resolved_ty.castTag(.enum_full).?.data; - if (!(try sema.typeHasRuntimeBits(enum_obj.tag_ty))) { - return null; - } - switch (enum_obj.fields.count()) { - 0 => return Value.@"unreachable", - 1 => if (enum_obj.values.count() == 0) { - return Value.enum_field_0; // auto-numbered - } else { - return enum_obj.values.keys()[0]; - }, - else => return null, - } - }, - .enum_simple => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_simple = resolved_ty.castTag(.enum_simple).?.data; - switch (enum_simple.fields.count()) { - 0 => return Value.@"unreachable", - 1 => return Value.enum_field_0, - else => return null, - } - }, - .enum_nonexhaustive => { - const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (tag_ty.zigTypeTag(mod) != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { - return Value.enum_field_0; - } else { - return null; - } - }, - .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); @@ -33295,7 +33188,28 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return only.toValue(); }, .opaque_type => null, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => { + if (enum_type.tag_ty != .comptime_int_type and + !(try sema.typeHasRuntimeBits(enum_type.tag_ty.toType()))) + { + return Value.enum_field_0; + } else { + return null; + } + }, + .auto, .explicit => switch (enum_type.names.len) { + 0 => return Value.@"unreachable", + 1 => { + if (enum_type.values.len == 0) { + return Value.enum_field_0; // auto-numbered + } else { + return enum_type.values[0].toValue(); + } + }, + else => return null, + }, + }, // values, not types .un => unreachable, @@ -33701,7 +33615,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_single, .error_set_inferred, .error_set_merged, - .enum_simple, => false, .function => true, @@ -33742,14 +33655,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const child_ty = ty.castTag(.anyframe_T).?.data; return sema.typeRequiresComptime(child_ty); }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return sema.typeRequiresComptime(tag_ty); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return sema.typeRequiresComptime(tag_ty); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return false, @@ -33865,7 +33770,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, .opaque_type => false, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()), // values, not types .un => unreachable, @@ -34435,42 +34340,19 @@ fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { /// Asserts the type is an enum. fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { const mod = sema.mod; - switch (ty.tag()) { - .enum_nonexhaustive => unreachable, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - const tag_ty = enum_full.tag_ty; - if (enum_full.values.count() == 0) { - return sema.intInRange(tag_ty, int, enum_full.fields.count()); - } else { - return enum_full.values.containsContext(int, .{ - .ty = tag_ty, - .mod = sema.mod, - }); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - const tag_ty = enum_obj.tag_ty; - if (enum_obj.values.count() == 0) { - return sema.intInRange(tag_ty, int, enum_obj.fields.count()); - } else { - return enum_obj.values.containsContext(int, .{ - .ty = tag_ty, - .mod = sema.mod, - }); - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const fields_len = enum_simple.fields.count(); - const bits = std.math.log2_int_ceil(usize, fields_len); - const tag_ty = try mod.intType(.unsigned, bits); - return sema.intInRange(tag_ty, int, fields_len); - }, - - else => unreachable, + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + assert(enum_type.tag_mode != .nonexhaustive); + if (enum_type.values.len == 0) { + // auto-numbered + return sema.intInRange(enum_type.tag_ty.toType(), int, enum_type.names.len); } + + // The `tagValueIndex` function call below relies on the type being the integer tag type. + // `getCoerced` assumes the value will fit the new type. + if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false; + const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.ip_index, enum_type.tag_ty); + + return enum_type.tagValueIndex(mod.intern_pool, int_coerced) != null; } fn intAddWithOverflow( diff --git a/src/TypedValue.zig b/src/TypedValue.zig index cf9888f357..5f295e42f3 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -198,7 +198,7 @@ pub fn print( .empty_array => return writer.writeAll(".{}"), .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), .enum_field_index => { - return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data)}); + return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data, mod)}); }, .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index c1409e4977..a2f4f81053 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3101,24 +3101,12 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, .Enum => { if (val.castTag(.enum_field_index)) |field_index| { - switch (ty.tag()) { - .enum_simple => return WValue{ .imm32 = field_index.data }, - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index.data]; - return func.lowerConstant(tag_val, enum_full.tag_ty); - } else { - return WValue{ .imm32 = field_index.data }; - } - }, - .enum_numbered => { - const index = field_index.data; - const enum_data = ty.castTag(.enum_numbered).?.data; - const enum_val = enum_data.values.keys()[index]; - return func.lowerConstant(enum_val, enum_data.tag_ty); - }, - else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}), + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + const tag_val = enum_type.values[field_index.data]; + return func.lowerConstant(tag_val.toValue(), enum_type.tag_ty.toType()); + } else { + return WValue{ .imm32 = field_index.data }; } } else { const int_tag_ty = try ty.intTagType(mod); @@ -3240,21 +3228,12 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) !i32 { switch (ty.zigTypeTag(mod)) { .Enum => { if (val.castTag(.enum_field_index)) |field_index| { - switch (ty.tag()) { - .enum_simple => return @bitCast(i32, field_index.data), - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index.data]; - return func.valueAsI32(tag_val, enum_full.tag_ty); - } else return @bitCast(i32, field_index.data); - }, - .enum_numbered => { - const index = field_index.data; - const enum_data = ty.castTag(.enum_numbered).?.data; - return func.valueAsI32(enum_data.values.keys()[index], enum_data.tag_ty); - }, - else => unreachable, + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + const tag_val = enum_type.values[field_index.data]; + return func.valueAsI32(tag_val.toValue(), enum_type.tag_ty.toType()); + } else { + return @bitCast(i32, field_index.data); } } else { const int_tag_ty = try ty.intTagType(mod); @@ -6836,7 +6815,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse. // generate an if-else chain for each tag value as well as constant. - for (enum_ty.enumFields().keys(), 0..) |tag_name, field_index| { + for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index| { + const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); // for each tag name, create an unnamed const, // and then get a pointer to its value. const name_ty = try mod.arrayType(.{ @@ -6846,7 +6826,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { }); const string_bytes = &mod.string_literal_bytes; try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); - const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, tag_name, Module.StringLiteralAdapter{ + const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, @as([]const u8, tag_name), Module.StringLiteralAdapter{ .bytes = string_bytes, }, Module.StringLiteralContext{ .bytes = string_bytes, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 7b93ff2059..72f416ca87 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2016,7 +2016,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { const ret_reg = param_regs[0]; const enum_mcv = MCValue{ .register = param_regs[1] }; - var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount()); + var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount(mod)); defer self.gpa.free(exitlude_jump_relocs); const data_reg = try self.register_manager.allocReg(null, gp); @@ -2027,9 +2027,10 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { var data_off: i32 = 0; for ( exitlude_jump_relocs, - enum_ty.enumFields().keys(), + enum_ty.enumFields(mod), 0.., - ) |*exitlude_jump_reloc, tag_name, index| { + ) |*exitlude_jump_reloc, tag_name_ip, index| { + const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, index), @@ -11413,7 +11414,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; const tag_ty = union_obj.tag_ty; - const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); + const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; const tag_val = Value.initPayload(&tag_pl.base); const tag_int_val = try tag_val.enumToInt(tag_ty, mod); diff --git a/src/codegen.zig b/src/codegen.zig index 5c022392bf..148a69016a 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -156,7 +156,8 @@ pub fn generateLazySymbol( return Result.ok; } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) { alignment.* = 1; - for (lazy_sym.ty.enumFields().keys()) |tag_name| { + for (lazy_sym.ty.enumFields(mod)) |tag_name_ip| { + const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); try code.ensureUnusedCapacity(tag_name.len + 1); code.appendSliceAssumeCapacity(tag_name); code.appendAssumeCapacity(0); @@ -1229,26 +1230,15 @@ pub fn genTypedValue( }, .Enum => { if (typed_value.val.castTag(.enum_field_index)) |field_index| { - switch (typed_value.ty.tag()) { - .enum_simple => { - return GenResult.mcv(.{ .immediate = field_index.data }); - }, - .enum_numbered, .enum_full, .enum_nonexhaustive => { - const enum_values = if (typed_value.ty.castTag(.enum_numbered)) |pl| - pl.data.values - else - typed_value.ty.cast(Type.Payload.EnumFull).?.data.values; - if (enum_values.count() != 0) { - const tag_val = enum_values.keys()[field_index.data]; - return genTypedValue(bin_file, src_loc, .{ - .ty = try typed_value.ty.intTagType(mod), - .val = tag_val, - }, owner_decl_index); - } else { - return GenResult.mcv(.{ .immediate = field_index.data }); - } - }, - else => unreachable, + const enum_type = mod.intern_pool.indexToKey(typed_value.ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + const tag_val = enum_type.values[field_index.data]; + return genTypedValue(bin_file, src_loc, .{ + .ty = enum_type.tag_ty.toType(), + .val = tag_val.toValue(), + }, owner_decl_index); + } else { + return GenResult.mcv(.{ .immediate = field_index.data }); } } else { const int_tag_ty = try typed_value.ty.intTagType(mod); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 872bdb94d3..a67d39471a 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1288,27 +1288,12 @@ pub const DeclGen = struct { switch (val.tag()) { .enum_field_index => { const field_index = val.castTag(.enum_field_index).?.data; - switch (ty.tag()) { - .enum_simple => return writer.print("{d}", .{field_index}), - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index]; - return dg.renderValue(writer, enum_full.tag_ty, tag_val, location); - } else { - return writer.print("{d}", .{field_index}); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - if (enum_obj.values.count() != 0) { - const tag_val = enum_obj.values.keys()[field_index]; - return dg.renderValue(writer, enum_obj.tag_ty, tag_val, location); - } else { - return writer.print("{d}", .{field_index}); - } - }, - else => unreachable, + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + const tag_val = enum_type.values[field_index]; + return dg.renderValue(writer, enum_type.tag_ty.toType(), tag_val.toValue(), location); + } else { + return writer.print("{d}", .{field_index}); } }, else => { @@ -2539,7 +2524,8 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try w.writeByte('('); try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete); try w.writeAll(") {\n switch (tag) {\n"); - for (enum_ty.enumFields().keys(), 0..) |name, index| { + for (enum_ty.enumFields(mod), 0..) |name_ip, index| { + const name = mod.intern_pool.stringToSlice(name_ip); var tag_pl: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, index), @@ -6930,7 +6916,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const field: CValue = if (union_ty.unionTagTypeSafety(mod)) |tag_ty| field: { const layout = union_ty.unionGetLayout(mod); if (layout.tag_size != 0) { - const field_index = tag_ty.enumFieldIndex(field_name).?; + const field_index = tag_ty.enumFieldIndex(field_name, mod).?; var tag_pl: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c299253442..583c08583c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1516,30 +1516,25 @@ pub const Object = struct { return enum_di_ty; } - const field_names = ty.enumFields().keys(); + const ip = &mod.intern_pool; + const enum_type = ip.indexToKey(ty.ip_index).enum_type; - const enumerators = try gpa.alloc(*llvm.DIEnumerator, field_names.len); + const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len); defer gpa.free(enumerators); - var buf_field_index: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = undefined, - }; - const field_index_val = Value.initPayload(&buf_field_index.base); - - const int_ty = try ty.intTagType(mod); + const int_ty = enum_type.tag_ty.toType(); const int_info = ty.intInfo(mod); assert(int_info.bits != 0); - for (field_names, 0..) |field_name, i| { - const field_name_z = try gpa.dupeZ(u8, field_name); - defer gpa.free(field_name_z); + for (enum_type.names, 0..) |field_name_ip, i| { + const field_name_z = ip.stringToSlice(field_name_ip); - buf_field_index.data = @intCast(u32, i); - const field_int_val = try field_index_val.enumToInt(ty, mod); - - var bigint_space: Value.BigIntSpace = undefined; - const bigint = field_int_val.toBigInt(&bigint_space, mod); + var bigint_space: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const storage = if (enum_type.values.len != 0) + ip.indexToKey(enum_type.values[i]).int.storage + else + InternPool.Key.Int.Storage{ .u64 = i }; + const bigint = storage.toBigInt(&bigint_space); if (bigint.limbs.len == 1) { enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned); @@ -8852,23 +8847,22 @@ pub const FuncGen = struct { fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { const mod = self.dg.module; - const enum_decl = enum_ty.getOwnerDecl(mod); + const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type; // TODO: detect when the type changes and re-emit this function. - const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_decl); + const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_type.decl); if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(self.dg.object.named_enum_map.remove(enum_decl)); + errdefer assert(self.dg.object.named_enum_map.remove(enum_type.decl)); var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod); + const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); - const int_tag_ty = try enum_ty.intTagType(mod); - const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; + const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())}; const llvm_ret_ty = try self.dg.lowerType(Type.bool); const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); @@ -8891,13 +8885,12 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(entry_block); self.builder.clearCurrentDebugLocation(); - const fields = enum_ty.enumFields(); const named_block = self.context.appendBasicBlock(fn_val, "Named"); const unnamed_block = self.context.appendBasicBlock(fn_val, "Unnamed"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, fields.count())); + const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len)); - for (fields.keys(), 0..) |_, field_index| { + for (enum_type.names, 0..) |_, field_index| { const this_tag_int_value = int: { var tag_val_payload: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, @@ -8930,18 +8923,18 @@ pub const FuncGen = struct { fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { const mod = self.dg.module; - const enum_decl = enum_ty.getOwnerDecl(mod); + const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type; // TODO: detect when the type changes and re-emit this function. - const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_decl); + const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_type.decl); if (gop.found_existing) return gop.value_ptr.*; - errdefer assert(self.dg.object.decl_map.remove(enum_decl)); + errdefer assert(self.dg.object.decl_map.remove(enum_type.decl)); var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_decl).getFullyQualifiedName(mod); + const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); @@ -8950,8 +8943,7 @@ pub const FuncGen = struct { const usize_llvm_ty = try self.dg.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); - const int_tag_ty = try enum_ty.intTagType(mod); - const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)}; + const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); @@ -8973,16 +8965,16 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(entry_block); self.builder.clearCurrentDebugLocation(); - const fields = enum_ty.enumFields(); const bad_value_block = self.context.appendBasicBlock(fn_val, "BadValue"); const tag_int_value = fn_val.getParam(0); - const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, fields.count())); + const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, enum_type.names.len)); const array_ptr_indices = [_]*llvm.Value{ usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), }; - for (fields.keys(), 0..) |name, field_index| { + for (enum_type.names, 0..) |name_ip, field_index| { + const name = mod.intern_pool.stringToSlice(name_ip); const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_init_llvm_ty = str_init.typeOf(); const str_global = self.dg.object.llvm_module.addGlobal(str_init_llvm_ty, ""); @@ -9429,7 +9421,7 @@ pub const FuncGen = struct { const tag_int = blk: { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const union_field_name = union_obj.fields.keys()[extra.field_index]; - const enum_field_index = tag_ty.enumFieldIndex(union_field_name).?; + const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?; var tag_val_payload: Value.Payload.U32 = .{ .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, enum_field_index), diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index d1e8d9601b..e20e127800 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -401,14 +401,9 @@ pub const DeclState = struct { dbg_info_buffer.appendSliceAssumeCapacity(enum_name); dbg_info_buffer.appendAssumeCapacity(0); - const fields = ty.enumFields(); - const values: ?Module.EnumFull.ValueMap = switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Type.Payload.EnumFull).?.data.values, - .enum_simple => null, - .enum_numbered => ty.castTag(.enum_numbered).?.data.values, - else => unreachable, - }; - for (fields.keys(), 0..) |field_name, field_i| { + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + for (enum_type.names, 0..) |field_name_index, field_i| { + const field_name = mod.intern_pool.stringToSlice(field_name_index); // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2 + @sizeOf(u64)); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant)); @@ -416,14 +411,14 @@ pub const DeclState = struct { dbg_info_buffer.appendSliceAssumeCapacity(field_name); dbg_info_buffer.appendAssumeCapacity(0); // DW.AT.const_value, DW.FORM.data8 - const value: u64 = if (values) |vals| value: { - if (vals.count() == 0) break :value @intCast(u64, field_i); // auto-numbered - const value = vals.keys()[field_i]; + const value: u64 = value: { + if (enum_type.values.len == 0) break :value field_i; // auto-numbered + const value = enum_type.values[field_i]; // TODO do not assume a 64bit enum value - could be bigger. // See https://github.com/ziglang/zig/issues/645 - const field_int_val = try value.enumToInt(ty, mod); + const field_int_val = try value.toValue().enumToInt(ty, mod); break :value @bitCast(u64, field_int_val.toSignedInt(mod)); - } else @intCast(u64, field_i); + }; mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian); } diff --git a/src/type.zig b/src/type.zig index ab02b29d49..a2644ebff4 100644 --- a/src/type.zig +++ b/src/type.zig @@ -62,12 +62,6 @@ pub const Type = struct { .tuple, .anon_struct, => return .Struct, - - .enum_full, - .enum_nonexhaustive, - .enum_simple, - .enum_numbered, - => return .Enum, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return .Int, @@ -566,22 +560,6 @@ pub const Type = struct { return true; }, - - .enum_full, .enum_nonexhaustive => { - const a_enum_obj = a.cast(Payload.EnumFull).?.data; - const b_enum_obj = (b.cast(Payload.EnumFull) orelse return false).data; - return a_enum_obj == b_enum_obj; - }, - .enum_simple => { - const a_enum_obj = a.cast(Payload.EnumSimple).?.data; - const b_enum_obj = (b.cast(Payload.EnumSimple) orelse return false).data; - return a_enum_obj == b_enum_obj; - }, - .enum_numbered => { - const a_enum_obj = a.cast(Payload.EnumNumbered).?.data; - const b_enum_obj = (b.cast(Payload.EnumNumbered) orelse return false).data; - return a_enum_obj == b_enum_obj; - }, } } @@ -727,22 +705,6 @@ pub const Type = struct { field_val.hash(field_ty, hasher, mod); } }, - - .enum_full, .enum_nonexhaustive => { - const enum_obj: *const Module.EnumFull = ty.cast(Payload.EnumFull).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Enum); - std.hash.autoHash(hasher, enum_obj); - }, - .enum_simple => { - const enum_obj: *const Module.EnumSimple = ty.cast(Payload.EnumSimple).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Enum); - std.hash.autoHash(hasher, enum_obj); - }, - .enum_numbered => { - const enum_obj: *const Module.EnumNumbered = ty.cast(Payload.EnumNumbered).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Enum); - std.hash.autoHash(hasher, enum_obj); - }, } } @@ -920,9 +882,6 @@ pub const Type = struct { .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred), .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), - .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple), - .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered), - .enum_full, .enum_nonexhaustive => return self.copyPayloadShallow(allocator, Payload.EnumFull), } } @@ -995,25 +954,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), enum_full.owner_decl, - }); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), enum_simple.owner_decl, - }); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - return writer.print("({s} decl={d})", .{ - @tagName(t), enum_numbered.owner_decl, - }); - }, - .function => { const payload = ty.castTag(.function).?.data; try writer.writeAll("fn("); @@ -1199,22 +1139,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - const decl = mod.declPtr(enum_full.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const decl = mod.declPtr(enum_simple.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - const decl = mod.declPtr(enum_numbered.owner_decl); - try decl.renderFullyQualifiedName(mod, writer); - }, - .error_set_inferred => { const func = ty.castTag(.error_set_inferred).?.data.func; @@ -1500,7 +1424,10 @@ pub const Type = struct { const decl = mod.declPtr(opaque_type.decl); try decl.renderFullyQualifiedName(mod, writer); }, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| { + const decl = mod.declPtr(enum_type.decl); + try decl.renderFullyQualifiedName(mod, writer); + }, // values, not types .un => unreachable, @@ -1593,19 +1520,6 @@ pub const Type = struct { } }, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - return enum_full.tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.fields.count() >= 2; - }, - .enum_numbered, .enum_nonexhaustive => { - const int_tag_ty = try ty.intTagType(mod); - return int_tag_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat); - }, - .array => return ty.arrayLen(mod) != 0 and try ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), .array_sentinel => return ty.childType(mod).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), @@ -1766,7 +1680,7 @@ pub const Type = struct { }, .opaque_type => true, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), // values, not types .un => unreachable, @@ -1789,9 +1703,7 @@ pub const Type = struct { .empty_struct_type => false, .none => switch (ty.tag()) { - .pointer, - .enum_numbered, - => true, + .pointer => true, .error_set, .error_set_single, @@ -1799,17 +1711,12 @@ pub const Type = struct { .error_set_merged, // These are function bodies, not function pointers. .function, - .enum_simple, .error_union, .anyframe_T, .tuple, .anon_struct, => false, - .enum_full, - .enum_nonexhaustive, - => !ty.cast(Payload.EnumFull).?.data.tag_ty_inferred, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, @@ -1886,7 +1793,10 @@ pub const Type = struct { .tagged => false, }, .opaque_type => false, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .auto => false, + .explicit, .nonexhaustive => true, + }, // values, not types .un => unreachable, @@ -2116,11 +2026,6 @@ pub const Type = struct { return AbiAlignmentAdvanced{ .scalar = big_align }; }, - .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { - const int_tag_ty = try ty.intTagType(mod); - return AbiAlignmentAdvanced{ .scalar = int_tag_ty.abiAlignment(mod) }; - }, - .inferred_alloc_const, .inferred_alloc_mut, => unreachable, @@ -2283,7 +2188,7 @@ pub const Type = struct { return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); }, .opaque_type => return AbiAlignmentAdvanced{ .scalar = 1 }, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) }, // values, not types .un => unreachable, @@ -2475,11 +2380,6 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, - .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = try ty.intTagType(mod); - return AbiSizeAdvanced{ .scalar = int_tag_ty.abiSize(mod) }; - }, - .array => { const payload = ty.castTag(.array).?.data; switch (try payload.elem_type.abiSizeAdvanced(mod, strat)) { @@ -2705,7 +2605,7 @@ pub const Type = struct { return abiSizeAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); }, .opaque_type => unreachable, // no size available - .enum_type => @panic("TODO"), + .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) }, // values, not types .un => unreachable, @@ -2823,11 +2723,6 @@ pub const Type = struct { return total; }, - .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { - const int_tag_ty = try ty.intTagType(mod); - return try bitSizeAdvanced(int_tag_ty, mod, opt_sema); - }, - .array => { const payload = ty.castTag(.array).?.data; const elem_size = std.math.max(payload.elem_type.abiAlignment(mod), payload.elem_type.abiSize(mod)); @@ -2964,7 +2859,7 @@ pub const Type = struct { return size; }, .opaque_type => unreachable, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), // values, not types .un => unreachable, @@ -3433,7 +3328,7 @@ pub const Type = struct { pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize { const union_obj = mod.typeToUnion(ty).?; const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, mod) orelse return null; - const name = union_obj.tag_ty.enumFieldName(index); + const name = union_obj.tag_ty.enumFieldName(index, mod); return union_obj.fields.getIndex(name); } @@ -3690,15 +3585,6 @@ pub const Type = struct { while (true) switch (ty.ip_index) { .none => switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty, - .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, - .enum_simple => { - const enum_obj = ty.castTag(.enum_simple).?.data; - const field_count = enum_obj.fields.count(); - if (field_count == 0) return .{ .signedness = .unsigned, .bits = 0 }; - return .{ .signedness = .unsigned, .bits = smallestUnsignedBits(field_count - 1) }; - }, - .error_set, .error_set_single, .error_set_inferred, .error_set_merged => { // TODO revisit this when error sets support custom int types return .{ .signedness = .unsigned, .bits = 16 }; @@ -3728,7 +3614,7 @@ pub const Type = struct { assert(struct_obj.layout == .Packed); ty = struct_obj.backing_int_ty; }, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| ty = enum_type.tag_ty.toType(), .ptr_type => unreachable, .array_type => unreachable, @@ -3964,47 +3850,6 @@ pub const Type = struct { return Value.empty_struct; }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - // An explicit tag type is always provided for enum_numbered. - if (enum_numbered.tag_ty.hasRuntimeBits(mod)) { - return null; - } - assert(enum_numbered.fields.count() == 1); - return enum_numbered.values.keys()[0]; - }, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - if (enum_full.tag_ty.hasRuntimeBits(mod)) { - return null; - } - switch (enum_full.fields.count()) { - 0 => return Value.@"unreachable", - 1 => if (enum_full.values.count() == 0) { - return Value.enum_field_0; // auto-numbered - } else { - return enum_full.values.keys()[0]; - }, - else => return null, - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - switch (enum_simple.fields.count()) { - 0 => return Value.@"unreachable", - 1 => return Value.enum_field_0, - else => return null, - } - }, - .enum_nonexhaustive => { - const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (!tag_ty.hasRuntimeBits(mod)) { - return Value.enum_field_0; - } else { - return null; - } - }, - .array => { if (ty.arrayLen(mod) == 0) return Value.initTag(.empty_array); @@ -4123,7 +3968,28 @@ pub const Type = struct { return only.toValue(); }, .opaque_type => return null, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => { + if (enum_type.tag_ty != .comptime_int_type and + !enum_type.tag_ty.toType().hasRuntimeBits(mod)) + { + return Value.enum_field_0; + } else { + return null; + } + }, + .auto, .explicit => switch (enum_type.names.len) { + 0 => return Value.@"unreachable", + 1 => { + if (enum_type.values.len == 0) { + return Value.enum_field_0; // auto-numbered + } else { + return enum_type.values[0].toValue(); + } + }, + else => return null, + }, + }, // values, not types .un => unreachable, @@ -4151,7 +4017,6 @@ pub const Type = struct { .error_set_single, .error_set_inferred, .error_set_merged, - .enum_simple, => false, // These are function bodies, not function pointers. @@ -4191,14 +4056,6 @@ pub const Type = struct { const child_ty = ty.castTag(.anyframe_T).?.data; return child_ty.comptimeOnly(mod); }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return tag_ty.comptimeOnly(mod); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return tag_ty.comptimeOnly(mod); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, @@ -4293,7 +4150,7 @@ pub const Type = struct { .opaque_type => false, - .enum_type => @panic("TODO"), + .enum_type => |enum_type| enum_type.tag_ty.toType().comptimeOnly(mod), // values, not types .un => unreachable, @@ -4346,19 +4203,14 @@ pub const Type = struct { /// Returns null if the type has no namespace. pub fn getNamespaceIndex(ty: Type, mod: *Module) Module.Namespace.OptionalIndex { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .enum_full => ty.castTag(.enum_full).?.data.namespace.toOptional(), - .enum_nonexhaustive => ty.castTag(.enum_nonexhaustive).?.data.namespace.toOptional(), - else => .none, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), - .struct_type => |struct_type| struct_type.namespace, - .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + if (ty.ip_index == .none) return .none; + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + .struct_type => |struct_type| struct_type.namespace, + .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + .enum_type => |enum_type| enum_type.namespace, - else => .none, - }, + else => .none, }; } @@ -4444,29 +4296,23 @@ pub const Type = struct { /// Asserts the type is an enum or a union. pub fn intTagType(ty: Type, mod: *Module) !Type { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.tag_ty, - .enum_numbered => ty.castTag(.enum_numbered).?.data.tag_ty, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const field_count = enum_simple.fields.count(); - const bits: u16 = if (field_count == 0) 0 else std.math.log2_int_ceil(usize, field_count); - return mod.intType(.unsigned, bits); - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod), - else => unreachable, - }, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod), + .enum_type => |enum_type| enum_type.tag_ty.toType(), + else => unreachable, }; } - pub fn isNonexhaustiveEnum(ty: Type) bool { - return switch (ty.tag()) { - .enum_nonexhaustive => true, - else => false, + pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool { + return switch (ty.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => true, + .auto, .explicit => false, + }, + else => false, + }, }; } @@ -4510,25 +4356,26 @@ pub const Type = struct { return try Tag.error_set_merged.create(arena, names); } - pub fn enumFields(ty: Type) Module.EnumFull.NameMap { - return switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.fields, - .enum_simple => ty.castTag(.enum_simple).?.data.fields, - .enum_numbered => ty.castTag(.enum_numbered).?.data.fields, - else => unreachable, - }; + pub fn enumFields(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { + return mod.intern_pool.indexToKey(ty.ip_index).enum_type.names; } - pub fn enumFieldCount(ty: Type) usize { - return ty.enumFields().count(); + pub fn enumFieldCount(ty: Type, mod: *Module) usize { + return mod.intern_pool.indexToKey(ty.ip_index).enum_type.names.len; } - pub fn enumFieldName(ty: Type, field_index: usize) []const u8 { - return ty.enumFields().keys()[field_index]; + pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) [:0]const u8 { + const ip = &mod.intern_pool; + const field_name = ip.indexToKey(ty.ip_index).enum_type.names[field_index]; + return ip.stringToSlice(field_name); } - pub fn enumFieldIndex(ty: Type, field_name: []const u8) ?usize { - return ty.enumFields().getIndex(field_name); + pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?usize { + const ip = &mod.intern_pool; + const enum_type = ip.indexToKey(ty.ip_index).enum_type; + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(field_name).unwrap() orelse return null; + return enum_type.nameIndex(ip.*, field_name_interned); } /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or @@ -4538,50 +4385,20 @@ pub const Type = struct { if (enum_tag.castTag(.enum_field_index)) |payload| { return @as(usize, payload.data); } - const S = struct { - fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, m: *Module) ?usize { - if (int_val.compareAllWithZero(.lt, m)) return null; - const end_val = m.intValue(int_ty, end) catch |err| switch (err) { - // TODO: eliminate this failure condition - error.OutOfMemory => @panic("OOM"), - }; - if (int_val.compareScalar(.gte, end_val, int_ty, m)) return null; - return @intCast(usize, int_val.toUnsignedInt(m)); - } - }; - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - const tag_ty = enum_full.tag_ty; - if (enum_full.values.count() == 0) { - return S.fieldWithRange(tag_ty, enum_tag, enum_full.fields.count(), mod); - } else { - return enum_full.values.getIndexContext(enum_tag, .{ - .ty = tag_ty, - .mod = mod, - }); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - const tag_ty = enum_obj.tag_ty; - if (enum_obj.values.count() == 0) { - return S.fieldWithRange(tag_ty, enum_tag, enum_obj.fields.count(), mod); - } else { - return enum_obj.values.getIndexContext(enum_tag, .{ - .ty = tag_ty, - .mod = mod, - }); - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const fields_len = enum_simple.fields.count(); - const bits = std.math.log2_int_ceil(usize, fields_len); - const tag_ty = mod.intType(.unsigned, bits) catch @panic("TODO: handle OOM here"); - return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod); - }, - else => unreachable, + const ip = &mod.intern_pool; + const enum_type = ip.indexToKey(ty.ip_index).enum_type; + const tag_ty = enum_type.tag_ty.toType(); + if (enum_type.values.len == 0) { + if (enum_tag.compareAllWithZero(.lt, mod)) return null; + const end_val = mod.intValue(tag_ty, enum_type.names.len) catch |err| switch (err) { + // TODO: eliminate this failure condition + error.OutOfMemory => @panic("OOM"), + }; + if (enum_tag.compareScalar(.gte, end_val, tag_ty, mod)) return null; + return @intCast(usize, enum_tag.toUnsignedInt(mod)); + } else { + assert(ip.typeOf(enum_tag.ip_index) == enum_type.tag_ty); + return enum_type.tagValueIndex(ip.*, enum_tag.ip_index); } } @@ -4905,18 +4722,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return null, .none => switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.srcLoc(mod); - }, - .enum_numbered => { - const enum_numbered = ty.castTag(.enum_numbered).?.data; - return enum_numbered.srcLoc(mod); - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.srcLoc(mod); - }, .error_set => { const error_set = ty.castTag(.error_set).?.data; return error_set.srcLoc(mod); @@ -4934,6 +4739,7 @@ pub const Type = struct { return union_obj.srcLoc(mod); }, .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), + .enum_type => |enum_type| mod.declPtr(enum_type.decl).srcLoc(mod), else => null, }, } @@ -4946,15 +4752,6 @@ pub const Type = struct { pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index { switch (ty.ip_index) { .none => switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.owner_decl; - }, - .enum_numbered => return ty.castTag(.enum_numbered).?.data.owner_decl, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.owner_decl; - }, .error_set => { const error_set = ty.castTag(.error_set).?.data; return error_set.owner_decl; @@ -4972,6 +4769,7 @@ pub const Type = struct { return union_obj.owner_decl; }, .opaque_type => |opaque_type| opaque_type.decl, + .enum_type => |enum_type| enum_type.decl, else => null, }, } @@ -5012,10 +4810,6 @@ pub const Type = struct { /// The type is the inferred error set of a specific function. error_set_inferred, error_set_merged, - enum_simple, - enum_numbered, - enum_full, - enum_nonexhaustive, pub const last_no_payload_tag = Tag.inferred_alloc_const; pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; @@ -5040,9 +4834,6 @@ pub const Type = struct { .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, - .enum_full, .enum_nonexhaustive => Payload.EnumFull, - .enum_simple => Payload.EnumSimple, - .enum_numbered => Payload.EnumNumbered, .tuple => Payload.Tuple, .anon_struct => Payload.AnonStruct, }; @@ -5341,21 +5132,6 @@ pub const Type = struct { values: []Value, }; }; - - pub const EnumFull = struct { - base: Payload, - data: *Module.EnumFull, - }; - - pub const EnumSimple = struct { - base: Payload = .{ .tag = .enum_simple }, - data: *Module.EnumSimple, - }; - - pub const EnumNumbered = struct { - base: Payload = .{ .tag = .enum_numbered }, - data: *Module.EnumNumbered, - }; }; pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined }; diff --git a/src/value.zig b/src/value.zig index dfeaa44428..3f7e8050a4 100644 --- a/src/value.zig +++ b/src/value.zig @@ -675,80 +675,50 @@ pub const Value = struct { const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, .the_only_possible_value => blk: { - assert(ty.enumFieldCount() == 1); + assert(ty.enumFieldCount(mod) == 1); break :blk 0; }, .enum_literal => i: { const name = val.castTag(.enum_literal).?.data; - break :i ty.enumFieldIndex(name).?; + break :i ty.enumFieldIndex(name, mod).?; }, // Assume it is already an integer and return it directly. else => return val, }; - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - return enum_full.values.keys()[field_index]; - } else { - // Field index and integer values are the same. - return mod.intValue(enum_full.tag_ty, field_index); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - if (enum_obj.values.count() != 0) { - return enum_obj.values.keys()[field_index]; - } else { - // Field index and integer values are the same. - return mod.intValue(enum_obj.tag_ty, field_index); - } - }, - .enum_simple => { - // Field index and integer values are the same. - const tag_ty = try ty.intTagType(mod); - return mod.intValue(tag_ty, field_index); - }, - else => unreachable, + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + return enum_type.values[field_index].toValue(); + } else { + // Field index and integer values are the same. + return mod.intValue(enum_type.tag_ty.toType(), field_index); } } pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(mod), mod); + const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + const field_index = switch (val.tag()) { .enum_field_index => val.castTag(.enum_field_index).?.data, .the_only_possible_value => blk: { - assert(ty.enumFieldCount() == 1); + assert(ty.enumFieldCount(mod) == 1); break :blk 0; }, .enum_literal => return val.castTag(.enum_literal).?.data, else => field_index: { - const values = switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Type.Payload.EnumFull).?.data.values, - .enum_numbered => ty.castTag(.enum_numbered).?.data.values, - .enum_simple => Module.EnumFull.ValueMap{}, - else => unreachable, - }; - if (values.entries.len == 0) { + if (enum_type.values.len == 0) { // auto-numbered enum break :field_index @intCast(u32, val.toUnsignedInt(mod)); } - const int_tag_ty = ty.intTagType(mod) catch |err| switch (err) { - error.OutOfMemory => @panic("OOM"), // TODO handle this failure - }; - break :field_index @intCast(u32, values.getIndexContext(val, .{ .ty = int_tag_ty, .mod = mod }).?); + const field_index = enum_type.tagValueIndex(mod.intern_pool, val.ip_index).?; + break :field_index @intCast(u32, field_index); }, }; - const fields = switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => ty.cast(Type.Payload.EnumFull).?.data.fields, - .enum_numbered => ty.castTag(.enum_numbered).?.data.fields, - .enum_simple => ty.castTag(.enum_simple).?.data.fields, - else => unreachable, - }; - return fields.keys()[field_index]; + const field_name = enum_type.names[field_index]; + return mod.intern_pool.stringToSlice(field_name); } /// Asserts the value is an integer. -- cgit v1.2.3 From 88dbd62bcbac24c09791a7838d2f08c2f540967a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 12 May 2023 16:22:37 -0700 Subject: stage2: move enum tag values into the InternPool I'm seeing a new assertion trip: the call to `enumTagFieldIndex` in the implementation of `@Type` is attempting to query the field index of an union's enum tag, but the type of the enum tag value provided is not the same as the union's tag type. Most likely this is a problem with type coercion, since values are now typed. Another problem is that I added some hacks in std.builtin because I didn't see any convenient way to access them from Sema. That should definitely be cleaned up before merging this branch. --- lib/std/builtin.zig | 7 + src/Air.zig | 3 + src/InternPool.zig | 315 +++++++++++++++++++--------- src/Module.zig | 47 ++++- src/Sema.zig | 487 ++++++++++++++++++++++---------------------- src/TypedValue.zig | 21 +- src/Zir.zig | 3 + src/arch/wasm/CodeGen.zig | 87 ++++---- src/arch/x86_64/CodeGen.zig | 12 +- src/codegen.zig | 28 +-- src/codegen/c.zig | 56 ++--- src/codegen/llvm.zig | 44 ++-- src/codegen/spirv.zig | 8 +- src/link/Coff.zig | 2 +- src/link/Elf.zig | 2 +- src/link/MachO.zig | 2 +- src/link/Wasm.zig | 2 +- src/type.zig | 59 +++--- src/value.zig | 254 +++++++++-------------- 19 files changed, 768 insertions(+), 671 deletions(-) (limited to 'src/arch') diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index b1f1406684..429654bd4a 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -223,6 +223,13 @@ pub const SourceLocation = struct { pub const TypeId = std.meta.Tag(Type); pub const TypeInfo = @compileError("deprecated; use Type"); +/// TODO this is a temporary alias because I don't see any handy methods in +/// Sema for accessing inner declarations. +pub const PtrSize = Type.Pointer.Size; +/// TODO this is a temporary alias because I don't see any handy methods in +/// Sema for accessing inner declarations. +pub const TmpContainerLayoutAlias = Type.ContainerLayout; + /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const Type = union(enum) { diff --git a/src/Air.zig b/src/Air.zig index 8059b9e57f..e82a70100f 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -845,6 +845,7 @@ pub const Inst = struct { pub const Ref = enum(u32) { u1_type = @enumToInt(InternPool.Index.u1_type), + u5_type = @enumToInt(InternPool.Index.u5_type), u8_type = @enumToInt(InternPool.Index.u8_type), i8_type = @enumToInt(InternPool.Index.i8_type), u16_type = @enumToInt(InternPool.Index.u16_type), @@ -913,6 +914,8 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), + one_u5 = @enumToInt(InternPool.Index.one_u5), + four_u5 = @enumToInt(InternPool.Index.four_u5), negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), diff --git a/src/InternPool.zig b/src/InternPool.zig index 2677fba45d..eace006d4c 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -144,6 +144,9 @@ pub const Key = union(enum) { opaque_type: OpaqueType, enum_type: EnumType, + /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented + /// via `simple_value` and has a named `Index` tag for it. + undef: Index, simple_value: SimpleValue, extern_func: struct { ty: Index, @@ -155,13 +158,12 @@ pub const Key = union(enum) { lib_name: u32, }, int: Key.Int, + /// A specific enum tag, indicated by the integer tag value. + enum_tag: Key.EnumTag, float: Key.Float, ptr: Ptr, opt: Opt, - enum_tag: struct { - ty: Index, - tag: BigIntConst, - }, + /// An instance of a struct, array, or vector. /// Each element/field stored as an `Index`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -284,21 +286,33 @@ pub const Key = union(enum) { }; /// Look up field index based on field name. - pub fn nameIndex(self: EnumType, ip: InternPool, name: NullTerminatedString) ?usize { + pub fn nameIndex(self: EnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 { const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)]; const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; - return map.getIndexAdapted(name, adapter); + const field_index = map.getIndexAdapted(name, adapter) orelse return null; + return @intCast(u32, field_index); } /// Look up field index based on tag value. /// Asserts that `values_map` is not `none`. /// This function returns `null` when `tag_val` does not have the /// integer tag type of the enum. - pub fn tagValueIndex(self: EnumType, ip: InternPool, tag_val: Index) ?usize { + pub fn tagValueIndex(self: EnumType, ip: *const InternPool, tag_val: Index) ?u32 { assert(tag_val != .none); - const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)]; - const adapter: Index.Adapter = .{ .indexes = self.values }; - return map.getIndexAdapted(tag_val, adapter); + if (self.values_map.unwrap()) |values_map| { + const map = &ip.maps.items[@enumToInt(values_map)]; + const adapter: Index.Adapter = .{ .indexes = self.values }; + const field_index = map.getIndexAdapted(tag_val, adapter) orelse return null; + return @intCast(u32, field_index); + } + // Auto-numbered enum. Convert `tag_val` to field index. + switch (ip.indexToKey(tag_val).int.storage) { + .u64 => |x| { + if (x >= self.names.len) return null; + return @intCast(u32, x); + }, + .i64, .big_int => return null, // out of range + } } }; @@ -362,6 +376,13 @@ pub const Key = union(enum) { }; }; + pub const EnumTag = struct { + /// The enum type. + ty: Index, + /// The integer tag value which has the integer tag type of the enum. + int: Index, + }; + pub const Float = struct { ty: Index, /// The storage used must match the size of the float type being represented. @@ -436,6 +457,8 @@ pub const Key = union(enum) { .struct_type, .union_type, .un, + .undef, + .enum_tag, => |info| std.hash.autoHash(hasher, info), .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), @@ -471,12 +494,6 @@ pub const Key = union(enum) { } }, - .enum_tag => |enum_tag| { - std.hash.autoHash(hasher, enum_tag.ty); - std.hash.autoHash(hasher, enum_tag.tag.positive); - for (enum_tag.tag.limbs) |limb| std.hash.autoHash(hasher, limb); - }, - .aggregate => |aggregate| { std.hash.autoHash(hasher, aggregate.ty); for (aggregate.fields) |field| std.hash.autoHash(hasher, field); @@ -522,6 +539,10 @@ pub const Key = union(enum) { const b_info = b.simple_value; return a_info == b_info; }, + .undef => |a_info| { + const b_info = b.undef; + return a_info == b_info; + }, .extern_func => |a_info| { const b_info = b.extern_func; return std.meta.eql(a_info, b_info); @@ -542,6 +563,10 @@ pub const Key = union(enum) { const b_info = b.un; return std.meta.eql(a_info, b_info); }, + .enum_tag => |a_info| { + const b_info = b.enum_tag; + return std.meta.eql(a_info, b_info); + }, .ptr => |a_info| { const b_info = b.ptr; @@ -612,13 +637,6 @@ pub const Key = union(enum) { }; }, - .enum_tag => |a_info| { - const b_info = b.enum_tag; - _ = a_info; - _ = b_info; - @panic("TODO"); - }, - .opaque_type => |a_info| { const b_info = b.opaque_type; return a_info.decl == b_info.decl; @@ -636,7 +654,7 @@ pub const Key = union(enum) { } pub fn typeOf(key: Key) Index { - switch (key) { + return switch (key) { .int_type, .ptr_type, .array_type, @@ -648,7 +666,7 @@ pub const Key = union(enum) { .union_type, .opaque_type, .enum_type, - => return .type_type, + => .type_type, inline .ptr, .int, @@ -658,18 +676,20 @@ pub const Key = union(enum) { .enum_tag, .aggregate, .un, - => |x| return x.ty, + => |x| x.ty, + + .undef => |x| x, .simple_value => |s| switch (s) { - .undefined => return .undefined_type, - .void => return .void_type, - .null => return .null_type, - .false, .true => return .bool_type, - .empty_struct => return .empty_struct_type, - .@"unreachable" => return .noreturn_type, + .undefined => .undefined_type, + .void => .void_type, + .null => .null_type, + .false, .true => .bool_type, + .empty_struct => .empty_struct_type, + .@"unreachable" => .noreturn_type, .generic_poison => unreachable, }, - } + }; } }; @@ -693,6 +713,7 @@ pub const Index = enum(u32) { pub const last_value: Index = .empty_struct; u1_type, + u5_type, u8_type, i8_type, u16_type, @@ -769,6 +790,10 @@ pub const Index = enum(u32) { one, /// `1` (usize) one_usize, + /// `1` (u5) + one_u5, + /// `4` (u5) + four_u5, /// `-1` (comptime_int) negative_one, /// `std.builtin.CallingConvention.C` @@ -834,6 +859,12 @@ pub const static_keys = [_]Key{ .bits = 1, } }, + // u5_type + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 5, + } }, + .{ .int_type = .{ .signedness = .unsigned, .bits = 8, @@ -1021,25 +1052,30 @@ pub const static_keys = [_]Key{ .storage = .{ .u64 = 1 }, } }, + // one_u5 + .{ .int = .{ + .ty = .u5_type, + .storage = .{ .u64 = 1 }, + } }, + // four_u5 + .{ .int = .{ + .ty = .u5_type, + .storage = .{ .u64 = 4 }, + } }, + // negative_one .{ .int = .{ .ty = .comptime_int_type, .storage = .{ .i64 = -1 }, } }, - + // calling_convention_c .{ .enum_tag = .{ .ty = .calling_convention_type, - .tag = .{ - .limbs = &.{@enumToInt(std.builtin.CallingConvention.C)}, - .positive = true, - }, + .int = .one_u5, } }, - + // calling_convention_inline .{ .enum_tag = .{ .ty = .calling_convention_type, - .tag = .{ - .limbs = &.{@enumToInt(std.builtin.CallingConvention.Inline)}, - .positive = true, - }, + .int = .four_u5, } }, .{ .simple_value = .void }, @@ -1118,6 +1154,10 @@ pub const Tag = enum(u8) { /// `data` is `Module.Union.Index`. type_union_safety, + /// Typed `undefined`. + /// `data` is `Index` of the type. + /// Untyped `undefined` is stored instead via `simple_value`. + undef, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, @@ -1132,7 +1172,7 @@ pub const Tag = enum(u8) { /// already contains the optional type corresponding to this payload. opt_payload, /// An optional value that is null. - /// data is Index of the payload type. + /// data is Index of the optional type. opt_null, /// Type: u8 /// data is integer value @@ -1155,18 +1195,18 @@ pub const Tag = enum(u8) { /// A comptime_int that fits in an i32. /// data is integer value bitcasted to u32. int_comptime_int_i32, + /// An integer value that fits in 32 bits with an explicitly provided type. + /// data is extra index of `IntSmall`. + int_small, /// A positive integer value. - /// data is a limbs index to Int. + /// data is a limbs index to `Int`. int_positive, /// A negative integer value. - /// data is a limbs index to Int. + /// data is a limbs index to `Int`. int_negative, - /// An enum tag identified by a positive integer value. - /// data is a limbs index to Int. - enum_tag_positive, - /// An enum tag identified by a negative integer value. - /// data is a limbs index to Int. - enum_tag_negative, + /// An enum tag value. + /// data is extra index of `Key.EnumTag`. + enum_tag, /// An f16 value. /// data is float value bitcasted to u16 and zero-extended. float_f16, @@ -1404,6 +1444,11 @@ pub const Int = struct { limbs_len: u32, }; +pub const IntSmall = struct { + ty: Index, + value: u32, +}; + /// A f64 value, broken up into 2 u32 parts. pub const Float64 = struct { piece0: u32, @@ -1479,15 +1524,28 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void { try ip.items.ensureUnusedCapacity(gpa, static_keys.len); try ip.map.ensureUnusedCapacity(gpa, static_keys.len); try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); - try ip.limbs.ensureUnusedCapacity(gpa, 2); // This inserts all the statically-known values into the intern pool in the // order expected. for (static_keys) |key| _ = ip.get(gpa, key) catch unreachable; - // Sanity check. - assert(ip.indexToKey(.bool_true).simple_value == .true); - assert(ip.indexToKey(.bool_false).simple_value == .false); + if (std.debug.runtime_safety) { + // Sanity check. + assert(ip.indexToKey(.bool_true).simple_value == .true); + assert(ip.indexToKey(.bool_false).simple_value == .false); + + const cc_inline = ip.indexToKey(.calling_convention_inline).enum_tag.int; + const cc_c = ip.indexToKey(.calling_convention_c).enum_tag.int; + + assert(ip.indexToKey(cc_inline).int.storage.u64 == + @enumToInt(std.builtin.CallingConvention.Inline)); + + assert(ip.indexToKey(cc_c).int.storage.u64 == + @enumToInt(std.builtin.CallingConvention.C)); + + assert(ip.indexToKey(ip.typeOf(cc_inline)).int_type.bits == + @typeInfo(@typeInfo(std.builtin.CallingConvention).Enum.tag_type).Int.bits); + } assert(ip.items.len == static_keys.len); } @@ -1634,6 +1692,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_enum_explicit => indexToKeyEnum(ip, data, .explicit), .type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive), + .undef => .{ .undef = @intToEnum(Index, data) }, .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, @@ -1687,8 +1746,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }, .int_positive => indexToKeyBigInt(ip, data, true), .int_negative => indexToKeyBigInt(ip, data, false), - .enum_tag_positive => @panic("TODO"), - .enum_tag_negative => @panic("TODO"), + .int_small => { + const info = ip.extraData(IntSmall, data); + return .{ .int = .{ + .ty = info.ty, + .storage = .{ .u64 = info.value }, + } }; + }, .float_f16 => .{ .float = .{ .ty = .f16_type, .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) }, @@ -1734,6 +1798,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }; }, .union_value => .{ .un = ip.extraData(Key.Union, data) }, + .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) }, }; } @@ -1896,6 +1961,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = @enumToInt(simple_value), }); }, + .undef => |ty| { + assert(ty != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .undef, + .data = @enumToInt(ty), + }); + }, .struct_type => |struct_type| { ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{ @@ -2112,10 +2184,32 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } switch (int.storage) { .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small, + .data = try ip.addExtra(gpa, IntSmall{ + .ty = int.ty, + .value = casted, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } else |_| {} + const tag: Tag = if (big_int.positive) .int_positive else .int_negative; try addInt(ip, gpa, int.ty, tag, big_int.limbs); }, - inline .i64, .u64 => |x| { + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small, + .data = try ip.addExtra(gpa, IntSmall{ + .ty = int.ty, + .value = casted, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + var buf: [2]Limb = undefined; const big_int = BigIntMutable.init(&buf, x).toConst(); const tag: Tag = if (big_int.positive) .int_positive else .int_negative; @@ -2124,6 +2218,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, + .enum_tag => |enum_tag| { + assert(enum_tag.ty != .none); + assert(enum_tag.int != .none); + + ip.items.appendAssumeCapacity(.{ + .tag = .enum_tag, + .data = try ip.addExtra(gpa, enum_tag), + }); + }, + .float => |float| { switch (float.ty) { .f16_type => ip.items.appendAssumeCapacity(.{ @@ -2164,11 +2268,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, - .enum_tag => |enum_tag| { - const tag: Tag = if (enum_tag.tag.positive) .enum_tag_positive else .enum_tag_negative; - try addInt(ip, gpa, enum_tag.ty, tag, enum_tag.tag.limbs); - }, - .aggregate => |aggregate| { if (aggregate.fields.len == 0) { ip.items.appendAssumeCapacity(.{ @@ -2671,44 +2770,59 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index { /// Given an existing value, returns the same value but with the supplied type. /// Only some combinations are allowed: -/// * int to int +/// * int <=> int +/// * int <=> enum pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { switch (ip.indexToKey(val)) { - .int => |int| { - // The key cannot be passed directly to `get`, otherwise in the case of - // big_int storage, the limbs would be invalidated before they are read. - // Here we pre-reserve the limbs to ensure that the logic in `addInt` will - // not use an invalidated limbs pointer. - switch (int.storage) { - .u64 => |x| return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .u64 = x }, - } }), - .i64 => |x| return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .i64 = x }, - } }), - - .big_int => |big_int| { - const positive = big_int.positive; - const limbs = ip.limbsSliceToIndex(big_int.limbs); - // This line invalidates the limbs slice, but the indexes computed in the - // previous line are still correct. - try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); - return ip.get(gpa, .{ .int = .{ - .ty = new_ty, - .storage = .{ .big_int = .{ - .limbs = ip.limbsIndexToSlice(limbs), - .positive = positive, - } }, - } }); - }, - } + .int => |int| switch (ip.indexToKey(new_ty)) { + .enum_type => return ip.get(gpa, .{ .enum_tag = .{ + .ty = new_ty, + .int = val, + } }), + else => return getCoercedInts(ip, gpa, int, new_ty), + }, + .enum_tag => |enum_tag| { + // Assume new_ty is an integer type. + return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty); }, else => unreachable, } } +/// Asserts `val` has an integer type. +/// Assumes `new_ty` is an integer type. +pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Index) Allocator.Error!Index { + // The key cannot be passed directly to `get`, otherwise in the case of + // big_int storage, the limbs would be invalidated before they are read. + // Here we pre-reserve the limbs to ensure that the logic in `addInt` will + // not use an invalidated limbs pointer. + switch (int.storage) { + .u64 => |x| return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .u64 = x }, + } }), + .i64 => |x| return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .i64 = x }, + } }), + + .big_int => |big_int| { + const positive = big_int.positive; + const limbs = ip.limbsSliceToIndex(big_int.limbs); + // This line invalidates the limbs slice, but the indexes computed in the + // previous line are still correct. + try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); + return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = .{ .big_int = .{ + .limbs = ip.limbsIndexToSlice(limbs), + .positive = positive, + } }, + } }); + }, + } +} + pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex { const tags = ip.items.items(.tag); if (val == .none) return .none; @@ -2805,6 +2919,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_union_safety, => @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + .undef => 0, .simple_type => 0, .simple_value => 0, .ptr_int => @sizeOf(PtrInt), @@ -2817,15 +2932,15 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .int_usize => 0, .int_comptime_int_u32 => 0, .int_comptime_int_i32 => 0, + .int_small => @sizeOf(IntSmall), .int_positive, .int_negative, - .enum_tag_positive, - .enum_tag_negative, => b: { const int = ip.limbData(Int, data); break :b @sizeOf(Int) + int.limbs_len * 8; }, + .enum_tag => @sizeOf(Key.EnumTag), .float_f16 => 0, .float_f32 => 0, @@ -2958,3 +3073,9 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { pub fn typeOf(ip: InternPool, index: Index) Index { return ip.indexToKey(index).typeOf(); } + +/// Assumes that the enum's field indexes equal its value tags. +pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E { + const int = ip.indexToKey(i).enum_tag.int; + return @intToEnum(E, ip.indexToKey(int).int.storage.u64); +} diff --git a/src/Module.zig b/src/Module.zig index 426d274011..cf1fea3444 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6896,6 +6896,43 @@ pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value return i.toValue(); } +/// Creates an enum tag value based on the integer tag value. +pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value { + if (std.debug.runtime_safety) { + const tag = ty.zigTypeTag(mod); + assert(tag == .Enum); + } + const i = try intern(mod, .{ .enum_tag = .{ + .ty = ty.ip_index, + .int = tag_int, + } }); + return i.toValue(); +} + +/// Creates an enum tag value based on the field index according to source code +/// declaration order. +pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value { + const ip = &mod.intern_pool; + const gpa = mod.gpa; + const enum_type = ip.indexToKey(ty.ip_index).enum_type; + + if (enum_type.values.len == 0) { + // Auto-numbered fields. + return (try ip.get(gpa, .{ .enum_tag = .{ + .ty = ty.ip_index, + .int = try ip.get(gpa, .{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = field_index }, + } }), + } })).toValue(); + } + + return (try ip.get(gpa, .{ .enum_tag = .{ + .ty = ty.ip_index, + .int = enum_type.values[field_index], + } })).toValue(); +} + pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { if (std.debug.runtime_safety) { const tag = ty.zigTypeTag(mod); @@ -6967,8 +7004,8 @@ pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { /// `max`. Asserts that neither value is undef. /// TODO: if #3806 is implemented, this becomes trivial pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { - assert(!min.isUndef()); - assert(!max.isUndef()); + assert(!min.isUndef(mod)); + assert(!max.isUndef(mod)); if (std.debug.runtime_safety) { assert(Value.order(min, max, mod).compare(.lte)); @@ -6990,7 +7027,7 @@ pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { /// twos-complement integer; otherwise in an unsigned integer. /// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { - assert(!val.isUndef()); + assert(!val.isUndef(mod)); const key = mod.intern_pool.indexToKey(val.ip_index); switch (key.int.storage) { @@ -7193,3 +7230,7 @@ pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQu return owner_decl.srcLoc(mod); } } + +pub fn toEnum(mod: *Module, comptime E: type, val: Value) E { + return mod.intern_pool.toEnum(E, val.ip_index); +} diff --git a/src/Sema.zig b/src/Sema.zig index 9e89ca89ef..2fc364ebd7 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1904,8 +1904,9 @@ fn resolveDefinedValue( src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!?Value { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(air_ref)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { if (block.is_typeof) return null; return sema.failWithUseOfUndef(block, src); } @@ -4333,7 +4334,7 @@ fn validateUnionInit( const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); if (init_val) |val| { // Our task is to delete all the `field_ptr` and `store` instructions, and insert @@ -4832,7 +4833,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const elem_ty = operand_ty.elemType2(mod); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.fail(block, src, "cannot dereference undefined value", .{}); } } else if (!(try sema.validateRunTimeType(elem_ty, false))) { @@ -6194,15 +6195,16 @@ fn lookupInNamespace( } fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { + const mod = sema.mod; const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null; - if (func_val.isUndef()) return null; + if (func_val.isUndef(mod)) return null; const owner_decl_index = switch (func_val.tag()) { .extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl, .function => func_val.castTag(.function).?.data.owner_decl, - .decl_ref => sema.mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl, + .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl, else => return null, }; - return sema.mod.declPtr(owner_decl_index); + return mod.declPtr(owner_decl_index); } pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { @@ -8106,7 +8108,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } switch (val.tag()) { @@ -8326,7 +8328,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; return sema.failWithOwnedErrorMsg(msg); } - if (int_val.isUndef()) { + if (int_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, operand_src); } if (!(try sema.enumHasInt(dest_ty, int_val))) { @@ -11472,7 +11474,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (f != null) continue; cases_len += 1; - const item_val = try Value.Tag.enum_field_index.create(sema.arena, @intCast(u32, i)); + const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(u32, i)); const item_ref = try sema.addConstant(operand_ty, item_val); case_block.inline_case_capture = item_ref; @@ -12208,7 +12210,7 @@ fn zirShl( const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(sema.typeOf(lhs)); } // If rhs is 0, return lhs without doing any calculations. @@ -12255,7 +12257,7 @@ fn zirShl( } const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { - if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); + if (lhs_val.isUndef(mod)) return sema.addConstUndef(lhs_ty); const rhs_val = maybe_rhs_val orelse { if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); @@ -12389,7 +12391,7 @@ fn zirShr( const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); const runtime_src = if (maybe_rhs_val) |rhs_val| rs: { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. @@ -12434,7 +12436,7 @@ fn zirShr( }); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(lhs_ty); } if (air_tag == .shr_exact) { @@ -12578,7 +12580,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); @@ -13154,7 +13156,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. if (rhs_scalar_ty.isAnyFloat()) { // We handle float negation here to ensure negative zero is represented in the bits. if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(rhs_ty); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(rhs_ty); return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, sema.mod)); } try sema.requireRuntimeBlock(block, src, null); @@ -13297,7 +13299,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins switch (scalar_tag) { .Int, .ComptimeInt, .ComptimeFloat => { if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), @@ -13312,7 +13314,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13326,7 +13328,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const runtime_src = rs: { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { @@ -13434,7 +13436,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, compile error because there is a possible // value for which the division would result in a remainder. if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -13451,7 +13453,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13611,7 +13613,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), @@ -13626,7 +13628,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13635,7 +13637,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // TODO: if the RHS is one, return the LHS directly } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { @@ -13732,7 +13734,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), @@ -13747,7 +13749,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13755,7 +13757,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { @@ -13977,7 +13979,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // then emit a compile error saying you have to pick one. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -13995,7 +13997,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14024,7 +14026,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14034,7 +14036,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef() or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return sema.addConstant( @@ -14155,12 +14157,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14179,7 +14181,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14187,7 +14189,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14257,12 +14259,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14281,7 +14283,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14289,7 +14291,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14372,18 +14374,18 @@ fn zirOverflowArithmetic( // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14396,12 +14398,12 @@ fn zirOverflowArithmetic( // If the rhs is zero, then the result is lhs and no overflow occured. // Otherwise, if either result is undefined, both results are undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14416,7 +14418,7 @@ fn zirOverflowArithmetic( // Otherwise, if either of the arguments is undefined, both results are undefined. const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { @@ -14426,7 +14428,7 @@ fn zirOverflowArithmetic( } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef()) { + if (!rhs_val.isUndef(mod)) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { @@ -14437,7 +14439,7 @@ fn zirOverflowArithmetic( if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14451,18 +14453,18 @@ fn zirOverflowArithmetic( // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14606,12 +14608,12 @@ fn analyzeArithmetic( // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { @@ -14624,7 +14626,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .add_optimized else .add; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14653,13 +14655,13 @@ fn analyzeArithmetic( // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .addwrap_optimized else .addwrap; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14678,12 +14680,12 @@ fn analyzeArithmetic( // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14708,7 +14710,7 @@ fn analyzeArithmetic( // overflow, causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { @@ -14721,7 +14723,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .sub_optimized else .sub; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14750,7 +14752,7 @@ fn analyzeArithmetic( // If the RHS is zero, then the other operand is returned, even if it is undefined. // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14759,7 +14761,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .subwrap_optimized else .subwrap; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14775,7 +14777,7 @@ fn analyzeArithmetic( // If the RHS is zero, result is LHS. // If either of the operands are undefined, result is undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14783,7 +14785,7 @@ fn analyzeArithmetic( } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14814,7 +14816,7 @@ fn analyzeArithmetic( else => unreachable, }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (lhs_val.isNan(mod)) { return sema.addConstant(resolved_type, lhs_val); } @@ -14844,7 +14846,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mul_optimized else .mul; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { @@ -14874,7 +14876,7 @@ fn analyzeArithmetic( return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14908,7 +14910,7 @@ fn analyzeArithmetic( else => unreachable, }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); @@ -14922,7 +14924,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mulwrap_optimized else .mulwrap; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14935,7 +14937,7 @@ fn analyzeArithmetic( return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } return sema.addConstant( @@ -14956,7 +14958,7 @@ fn analyzeArithmetic( else => unreachable, }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const zero_val = if (is_vector) b: { break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); @@ -14969,7 +14971,7 @@ fn analyzeArithmetic( } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14982,7 +14984,7 @@ fn analyzeArithmetic( return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } @@ -15100,7 +15102,7 @@ fn analyzePtrArithmetic( const runtime_src = rs: { if (opt_ptr_val) |ptr_val| { if (opt_off_val) |offset_val| { - if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty); + if (ptr_val.isUndef(mod)) return sema.addConstUndef(new_ptr_ty); const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod)); if (offset_int == 0) return ptr; @@ -15363,7 +15365,7 @@ fn zirCmpEq( const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(lhs)) |lval| { if (try sema.resolveMaybeUndefVal(rhs)) |rval| { - if (lval.isUndef() or rval.isUndef()) { + if (lval.isUndef(mod) or rval.isUndef(mod)) { return sema.addConstUndef(Type.bool); } // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, @@ -15425,7 +15427,7 @@ fn analyzeCmpUnionTag( const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| { - if (enum_val.isUndef()) return sema.addConstUndef(Type.bool); + if (enum_val.isUndef(mod)) return sema.addConstUndef(Type.bool); const field_ty = union_ty.unionFieldType(enum_val, sema.mod); if (field_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; @@ -15527,9 +15529,9 @@ fn cmpSelf( const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { - if (lhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (resolved_type.zigTypeTag(mod) == .Vector) { const result_ty = try mod.vectorType(.{ @@ -15557,7 +15559,7 @@ fn cmpSelf( // bool eq/neq more efficiently. if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(mod), lhs_src); } } @@ -15892,68 +15894,69 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); const type_info_ty = try sema.getBuiltinType("Type"); + const type_info_tag_ty = type_info_ty.unionTagType(mod).?; switch (ty.zigTypeTag(mod)) { .Type => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Type)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Type)), .val = Value.void, }), ), .Void => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Void)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Void)), .val = Value.void, }), ), .Bool => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Bool)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Bool)), .val = Value.void, }), ), .NoReturn => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.NoReturn)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.NoReturn)), .val = Value.void, }), ), .ComptimeFloat => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeFloat)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeFloat)), .val = Value.void, }), ), .ComptimeInt => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeInt)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeInt)), .val = Value.void, }), ), .Undefined => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Undefined)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Undefined)), .val = Value.void, }), ), .Null => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Null)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Null)), .val = Value.void, }), ), .EnumLiteral => return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.EnumLiteral)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.EnumLiteral)), .val = Value.void, }), ), @@ -16040,10 +16043,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else Value.null; + const callconv_ty = try sema.getBuiltinType("CallingConvention"); + const field_values = try sema.arena.create([6]Value); field_values.* = .{ // calling_convention: CallingConvention, - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)), + try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc)), // alignment: comptime_int, try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)), // is_generic: bool, @@ -16059,26 +16064,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Fn)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); }, .Int => { + const signedness_ty = try sema.getBuiltinType("Signedness"); const info = ty.intInfo(mod); const field_values = try sema.arena.alloc(Value, 2); // signedness: Signedness, - field_values[0] = try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(info.signedness), - ); + field_values[0] = try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness)); // bits: u16, field_values[1] = try mod.intValue(Type.u16, info.bits); return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Int)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16091,7 +16094,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Float)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16103,10 +16106,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else try info.pointee_type.lazyAbiAlignment(mod, sema.arena); + const addrspace_ty = try sema.getBuiltinType("AddressSpace"); + const ptr_size_ty = try sema.getBuiltinType("PtrSize"); + const field_values = try sema.arena.create([8]Value); field_values.* = .{ // size: Size, - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size)), + try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size)), // is_const: bool, Value.makeBool(!info.mutable), // is_volatile: bool, @@ -16114,7 +16120,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // alignment: comptime_int, alignment, // address_space: AddressSpace - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace")), + try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace")), // child: type, try Value.Tag.ty.create(sema.arena, info.pointee_type), // is_allowzero: bool, @@ -16126,7 +16132,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Pointer)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16144,7 +16150,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Array)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16160,7 +16166,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Vector)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16173,7 +16179,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Optional)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16263,7 +16269,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorSet)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet)), .val = errors_val, }), ); @@ -16278,7 +16284,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorUnion)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16365,7 +16371,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Enum)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16454,13 +16460,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); } else Value.null; + const container_layout_ty = try sema.getBuiltinType("TmpContainerLayoutAlias"); + const field_values = try sema.arena.create([4]Value); field_values.* = .{ // layout: ContainerLayout, - try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(layout), - ), + try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)), // tag_type: ?type, enum_tag_ty_val, @@ -16473,7 +16478,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Union)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16625,13 +16630,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }; + const container_layout_ty = try sema.getBuiltinType("TmpContainerLayoutAlias"); + const field_values = try sema.arena.create([5]Value); field_values.* = .{ // layout: ContainerLayout, - try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(layout), - ), + try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)), // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -16645,7 +16649,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Struct)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16665,7 +16669,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant( type_info_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Opaque)), + .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque)), .val = try Value.Tag.aggregate.create(sema.arena, field_values), }), ); @@ -16912,7 +16916,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src); if (try sema.resolveMaybeUndefVal(operand)) |val| { - return if (val.isUndef()) + return if (val.isUndef(mod)) sema.addConstUndef(Type.bool) else if (val.toBool(mod)) Air.Inst.Ref.bool_false @@ -17879,7 +17883,7 @@ fn unionInit( if (try sema.resolveMaybeUndefVal(init)) |init_val| { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = init_val, @@ -17980,7 +17984,7 @@ fn zirStructInit( const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); const init_inst = try sema.resolveInst(item.data.init); if (try sema.resolveMaybeUndefVal(init_inst)) |val| { @@ -18614,7 +18618,7 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(Type.u1); + if (val.isUndef(mod)) return sema.addConstUndef(Type.u1); if (val.toBool(mod)) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1)); return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); } @@ -18673,7 +18677,7 @@ fn zirUnaryMath( .child = scalar_ty.ip_index, }); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); const elems = try sema.arena.alloc(Value, vec_len); @@ -18692,7 +18696,7 @@ fn zirUnaryMath( }, .ComptimeFloat, .Float => { if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - if (operand_val.isUndef()) + if (operand_val.isUndef(mod)) return sema.addConstUndef(operand_ty); const result_val = try eval(operand_val, operand_ty, sema.arena, sema.mod); return sema.addConstant(operand_ty, result_val); @@ -18809,7 +18813,7 @@ fn zirReify( const signedness_val = struct_val[0]; const bits_val = struct_val[1]; - const signedness = signedness_val.toEnum(std.builtin.Signedness); + const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = try mod.intType(signedness, bits); return sema.addType(ty); @@ -18874,7 +18878,7 @@ fn zirReify( break :t elem_ty; }; - const ptr_size = size_val.toEnum(std.builtin.Type.Pointer.Size); + const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); var actual_sentinel: ?Value = null; if (!sentinel_val.isNull(mod)) { @@ -18927,7 +18931,7 @@ fn zirReify( .mutable = !is_const_val.toBool(mod), .@"volatile" = is_volatile_val.toBool(mod), .@"align" = abi_align, - .@"addrspace" = address_space_val.toEnum(std.builtin.AddressSpace), + .@"addrspace" = mod.toEnum(std.builtin.AddressSpace, address_space_val), .pointee_type = try elem_ty.copy(sema.arena), .@"allowzero" = is_allowzero_val.toBool(mod), .sentinel = actual_sentinel, @@ -19033,7 +19037,7 @@ fn zirReify( const is_tuple_val = struct_val[4]; assert(struct_val.len == 5); - const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); + const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19208,7 +19212,7 @@ fn zirReify( if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } - const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); + const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); @@ -19309,7 +19313,7 @@ fn zirReify( } if (explicit_enum_info) |tag_info| { - const enum_index = tag_info.nameIndex(mod.intern_pool, field_name_ip) orelse { + const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); errdefer msg.destroy(gpa); @@ -19402,7 +19406,7 @@ fn zirReify( const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // calling_convention: CallingConvention, - const cc = struct_val[0].toEnum(std.builtin.CallingConvention); + const cc = mod.toEnum(std.builtin.CallingConvention, struct_val[0]); // alignment: comptime_int, const alignment_val = struct_val[1]; // is_generic: bool, @@ -20180,7 +20184,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| { - if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef()) { + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, operand_src); } if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { @@ -20315,7 +20319,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (try sema.resolveMaybeUndefValIntable(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(dest_ty); + if (val.isUndef(mod)) return sema.addConstUndef(dest_ty); if (!is_vector) { return sema.addConstant( dest_ty, @@ -20419,7 +20423,7 @@ fn zirBitCount( .child = result_scalar_ty.ip_index, }); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_ty); + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); const elems = try sema.arena.alloc(Value, vec_len); const scalar_ty = operand_ty.scalarType(mod); @@ -20439,7 +20443,7 @@ fn zirBitCount( }, .Int => { if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_scalar_ty); + if (val.isUndef(mod)) return sema.addConstUndef(result_scalar_ty); try sema.resolveLazyValue(val); return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod)); } else { @@ -20476,7 +20480,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(operand_ty); + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); const result_val = try val.byteSwap(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20486,7 +20490,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Vector => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); @@ -20524,7 +20528,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(operand_ty); + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); const result_val = try val.bitReverse(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20534,7 +20538,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! }, .Vector => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); @@ -21072,7 +21076,7 @@ fn resolveExportOptions( const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known"); - const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage); + const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); @@ -21084,7 +21088,7 @@ fn resolveExportOptions( const visibility_operand = try sema.fieldVal(block, src, options, "visibility", visibility_src); const visibility_val = try sema.resolveConstValue(block, visibility_src, visibility_operand, "visibility of exported value must be comptime-known"); - const visibility = visibility_val.toEnum(std.builtin.SymbolVisibility); + const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val); if (name.len < 1) { return sema.fail(block, name_src, "exported symbol name cannot be empty", .{}); @@ -21112,11 +21116,12 @@ fn resolveBuiltinEnum( comptime name: []const u8, reason: []const u8, ) CompileError!@field(std.builtin, name) { + const mod = sema.mod; const ty = try sema.getBuiltinType(name); const air_ref = try sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced, reason); - return val.toEnum(@field(std.builtin, name)); + return mod.toEnum(@field(std.builtin, name), val); } fn resolveAtomicOrder( @@ -21198,7 +21203,7 @@ fn zirCmpxchg( const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { if (try sema.resolveMaybeUndefVal(expected_value)) |expected_val| { if (try sema.resolveMaybeUndefVal(new_value)) |new_val| { - if (expected_val.isUndef() or new_val.isUndef()) { + if (expected_val.isUndef(mod) or new_val.isUndef(mod)) { // TODO: this should probably cause the memory stored at the pointer // to become undef as well return sema.addConstUndef(result_ty); @@ -21248,7 +21253,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I .child = scalar_ty.ip_index, }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { - if (scalar_val.isUndef()) return sema.addConstUndef(vector_ty); + if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty); return sema.addConstant( vector_ty, @@ -21300,7 +21305,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); + if (operand_val.isUndef(mod)) return sema.addConstUndef(scalar_ty); var accum: Value = try operand_val.elemValue(mod, 0); var i: u32 = 1; @@ -21420,7 +21425,7 @@ fn analyzeShuffle( var i: usize = 0; while (i < mask_len) : (i += 1) { const elem = try mask.elemValue(sema.mod, i); - if (elem.isUndef()) continue; + if (elem.isUndef(mod)) continue; const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; var chosen: u32 = undefined; @@ -21458,7 +21463,7 @@ fn analyzeShuffle( i = 0; while (i < mask_len) : (i += 1) { const mask_elem_val = try mask.elemValue(sema.mod, i); - if (mask_elem_val.isUndef()) { + if (mask_elem_val.isUndef(mod)) { values[i] = Value.undef; continue; } @@ -21559,13 +21564,13 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const maybe_b = try sema.resolveMaybeUndefVal(b); const runtime_src = if (maybe_pred) |pred_val| rs: { - if (pred_val.isUndef()) return sema.addConstUndef(vec_ty); + if (pred_val.isUndef(mod)) return sema.addConstUndef(vec_ty); if (maybe_a) |a_val| { - if (a_val.isUndef()) return sema.addConstUndef(vec_ty); + if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty); if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); const elems = try sema.gpa.alloc(Value, vec_len); for (elems, 0..) |*elem, i| { @@ -21587,16 +21592,16 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C } } else { if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } break :rs a_src; } } else rs: { if (maybe_a) |a_val| { - if (a_val.isUndef()) return sema.addConstUndef(vec_ty); + if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } break :rs pred_src; }; @@ -21803,10 +21808,10 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const runtime_src = if (maybe_mulend1) |mulend1_val| rs: { if (maybe_mulend2) |mulend2_val| { - if (mulend2_val.isUndef()) return sema.addConstUndef(ty); + if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty); if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod); return sema.addConstant(ty, result_val); } else { @@ -21814,16 +21819,16 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } } else { if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); } break :rs mulend2_src; } } else rs: { if (maybe_mulend2) |mulend2_val| { - if (mulend2_val.isUndef()) return sema.addConstUndef(ty); + if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty); } if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); } break :rs mulend1_src; }; @@ -21859,7 +21864,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const air_ref = try sema.resolveInst(extra.modifier); const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src); const modifier_val = try sema.resolveConstValue(block, modifier_src, modifier_ref, "call modifier must be comptime-known"); - var modifier = modifier_val.toEnum(std.builtin.CallModifier); + var modifier = mod.toEnum(std.builtin.CallModifier, modifier_val); switch (modifier) { // These can be upgraded to comptime or nosuspend calls. .auto, .never_tail, .no_async => { @@ -22111,8 +22116,8 @@ fn analyzeMinMax( runtime_known.unset(operand_idx); - if (cur_val.isUndef()) continue; // result is also undef - if (operand_val.isUndef()) { + if (cur_val.isUndef(mod)) continue; // result is also undef + if (operand_val.isUndef(mod)) { cur_minmax = try sema.addConstUndef(simd_op.result_ty); continue; } @@ -22165,7 +22170,7 @@ fn analyzeMinMax( var cur_max: Value = cur_min; for (1..len) |idx| { const elem_val = try val.elemValue(mod, idx); - if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef + if (elem_val.isUndef(mod)) break :blk orig_ty; // can't refine undef if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val; if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val; } @@ -22177,7 +22182,7 @@ fn analyzeMinMax( }); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats - if (val.isUndef()) break :blk orig_ty; // can't refine undef + if (val.isUndef(mod)) break :blk orig_ty; // can't refine undef break :blk try mod.intFittingRange(val, val); }; @@ -22205,7 +22210,7 @@ fn analyzeMinMax( // If the comptime-known part is undef we can avoid emitting actual instructions later const known_undef = if (cur_minmax) |operand| blk: { const val = (try sema.resolveMaybeUndefVal(operand)).?; - break :blk val.isUndef(); + break :blk val.isUndef(mod); } else false; if (cur_minmax == null) { @@ -22749,7 +22754,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - break :blk val.toEnum(std.builtin.AddressSpace); + break :blk mod.toEnum(std.builtin.AddressSpace, val); } else if (extra.data.bits.has_addrspace_ref) blk: { const addrspace_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; @@ -22759,7 +22764,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - break :blk addrspace_tv.val.toEnum(std.builtin.AddressSpace); + break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); } else target_util.defaultAddressSpace(target, .function); const @"linksection": FuncLinkSection = if (extra.data.bits.has_section_body) blk: { @@ -22797,7 +22802,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk null; } - break :blk val.toEnum(std.builtin.CallingConvention); + break :blk mod.toEnum(std.builtin.CallingConvention, val); } else if (extra.data.bits.has_cc_ref) blk: { const cc_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; @@ -22807,7 +22812,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - break :blk cc_tv.val.toEnum(std.builtin.CallingConvention); + break :blk mod.toEnum(std.builtin.CallingConvention, cc_tv.val); } else if (sema.owner_decl.is_exported and has_body) .C else @@ -22994,9 +22999,9 @@ fn resolvePrefetchOptions( const cache_val = try sema.resolveConstValue(block, cache_src, cache, "prefetch cache must be comptime-known"); return std.builtin.PrefetchOptions{ - .rw = rw_val.toEnum(std.builtin.PrefetchOptions.Rw), + .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), .locality = @intCast(u2, locality_val.toUnsignedInt(mod)), - .cache = cache_val.toEnum(std.builtin.PrefetchOptions.Cache), + .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -23059,7 +23064,7 @@ fn resolveExternOptions( const linkage_ref = try sema.fieldVal(block, src, options, "linkage", linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_ref, "linkage of the extern symbol must be comptime-known"); - const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage); + const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); @@ -24140,7 +24145,7 @@ fn fieldVal( const field_index = @intCast(u32, field_index_usize); return sema.addConstant( enum_ty, - try Value.Tag.enum_field_index.create(sema.arena, field_index), + try mod.enumValueFieldIndex(enum_ty, field_index), ); } } @@ -24155,8 +24160,8 @@ fn fieldVal( const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index = @intCast(u32, field_index_usize); - const enum_val = try Value.Tag.enum_field_index.create(arena, field_index); - return sema.addConstant(try child_type.copy(arena), enum_val); + const enum_val = try mod.enumValueFieldIndex(child_type, field_index); + return sema.addConstant(child_type, enum_val); }, .Struct, .Opaque => { if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { @@ -24355,8 +24360,8 @@ fn fieldPtr( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try enum_ty.copy(anon_decl.arena()), - try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), + enum_ty, + try mod.enumValueFieldIndex(enum_ty, field_index_u32), 0, // default alignment )); } @@ -24376,8 +24381,8 @@ fn fieldPtr( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try child_type.copy(anon_decl.arena()), - try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), + child_type, + try mod.enumValueFieldIndex(child_type, field_index_u32), 0, // default alignment )); }, @@ -24850,7 +24855,7 @@ fn structFieldVal( } if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { - if (struct_val.isUndef()) return sema.addConstUndef(field.ty); + if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty); if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { return sema.addConstant(field.ty, opv); } @@ -24922,7 +24927,7 @@ fn tupleFieldValByIndex( } if (try sema.resolveMaybeUndefVal(tuple_byval)) |tuple_val| { - if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); + if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty); if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| { return sema.addConstant(field_ty, opv); } @@ -24983,19 +24988,15 @@ fn unionFieldPtr( .Auto => if (!initializing) { const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse break :ct; - if (union_val.isUndef()) { + if (union_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, src); } const tag_and_val = union_val.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = enum_field_index, - }; - const field_tag = Value.initPayload(&field_tag_buf.base); + const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); if (!tag_matches) { const msg = msg: { - const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; + const active_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); errdefer msg.destroy(sema.gpa); @@ -25021,7 +25022,7 @@ fn unionFieldPtr( if (!initializing and union_obj.layout == .Auto and block.wantSafety() and union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { - const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); // TODO would it be better if get_union_tag supported pointers to unions? const union_val = try block.addTyOp(.load, union_ty, union_ptr); @@ -25054,14 +25055,10 @@ fn unionFieldVal( const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { - if (union_val.isUndef()) return sema.addConstUndef(field.ty); + if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty); const tag_and_val = union_val.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = enum_field_index, - }; - const field_tag = Value.initPayload(&field_tag_buf.base); + const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); switch (union_obj.layout) { .Auto => { @@ -25069,7 +25066,7 @@ fn unionFieldVal( return sema.addConstant(field.ty, tag_and_val.val); } else { const msg = msg: { - const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; + const active_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); errdefer msg.destroy(sema.gpa); @@ -25096,7 +25093,7 @@ fn unionFieldVal( if (union_obj.layout == .Auto and block.wantSafety() and union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { - const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); @@ -25364,7 +25361,7 @@ fn tupleField( } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { - if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); + if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty); return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index)); } @@ -25412,7 +25409,7 @@ fn elemValArray( } } if (maybe_undef_array_val) |array_val| { - if (array_val.isUndef()) { + if (array_val.isUndef(mod)) { return sema.addConstUndef(elem_ty); } if (maybe_index_val) |index_val| { @@ -25473,7 +25470,7 @@ fn elemPtrArray( const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset); if (maybe_undef_array_ptr_val) |array_ptr_val| { - if (array_ptr_val.isUndef()) { + if (array_ptr_val.isUndef(mod)) { return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { @@ -25580,7 +25577,7 @@ fn elemPtrSlice( const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset); if (maybe_undef_slice_val) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.addConstUndef(elem_ptr_ty); } const slice_len = slice_val.sliceLen(mod); @@ -25605,7 +25602,7 @@ fn elemPtrSlice( if (oob_safety and block.wantSafety()) { const len_inst = len: { if (maybe_undef_slice_val) |slice_val| - if (!slice_val.isUndef()) + if (!slice_val.isUndef(mod)) break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; @@ -25681,7 +25678,6 @@ fn coerceExtra( if (dest_ty.eql(inst_ty, mod)) return inst; - const arena = sema.arena; const maybe_inst_val = try sema.resolveMaybeUndefVal(inst); var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); @@ -26175,7 +26171,7 @@ fn coerceExtra( }; return sema.addConstant( dest_ty, - try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), + try mod.enumValueFieldIndex(dest_ty, @intCast(u32, field_index)), ); }, .Union => blk: { @@ -27858,8 +27854,9 @@ fn beginComptimePtrMutation( }, .Union => { const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); payload.* = .{ .data = .{ - .tag = try Value.Tag.enum_field_index.create(arena, field_index), + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), .val = Value.undef, } }; @@ -27934,11 +27931,10 @@ fn beginComptimePtrMutation( .@"union" => { // We need to set the active field of the union. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); + payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); return beginComptimePtrMutationInner( sema, @@ -28575,7 +28571,7 @@ fn coerceCompatiblePtrs( const mod = sema.mod; const inst_ty = sema.typeOf(inst); if (try sema.resolveMaybeUndefVal(inst)) |val| { - if (!val.isUndef() and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { + if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } // The comptime Value representation is compatible with both types. @@ -29426,7 +29422,7 @@ fn analyzeSlicePtr( const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); const result_ty = slice_ty.slicePtrFieldType(buf, mod); if (try sema.resolveMaybeUndefVal(slice)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_ty); + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); return sema.addConstant(result_ty, val.slicePtr()); } try sema.requireRuntimeBlock(block, slice_src, null); @@ -29439,8 +29435,9 @@ fn analyzeSliceLen( src: LazySrcLoc, slice_inst: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(slice_inst)) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.addConstUndef(Type.usize); } return sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)); @@ -29459,7 +29456,7 @@ fn analyzeIsNull( const mod = sema.mod; const result_ty = Type.bool; if (try sema.resolveMaybeUndefVal(operand)) |opt_val| { - if (opt_val.isUndef()) { + if (opt_val.isUndef(mod)) { return sema.addConstUndef(result_ty); } const is_null = opt_val.isNull(mod); @@ -29588,7 +29585,7 @@ fn analyzeIsNonErrComptimeOnly( } if (maybe_operand_val) |err_union| { - if (err_union.isUndef()) { + if (err_union.isUndef(mod)) { return sema.addConstUndef(Type.bool); } if (err_union.getError() == null) { @@ -29768,7 +29765,7 @@ fn analyzeSlice( } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveMaybeUndefVal(ptr_or_slice)) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.fail(block, src, "slice of undefined", .{}); } const has_sentinel = slice_ty.sentinel(mod) != null; @@ -29948,7 +29945,7 @@ fn analyzeSlice( return result; }; - if (!new_ptr_val.isUndef()) { + if (!new_ptr_val.isUndef(mod)) { return sema.addConstant(return_ty, new_ptr_val); } @@ -30069,19 +30066,19 @@ fn cmpNumeric( if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { // Compare ints: const vs. undefined (or vice versa) - if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef()) { + if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) { try sema.resolveLazyValue(lhs_val); if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } - } else if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef()) { + } else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) { try sema.resolveLazyValue(rhs_val); if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { return sema.addConstUndef(Type.bool); } if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { @@ -30097,7 +30094,7 @@ fn cmpNumeric( return Air.Inst.Ref.bool_false; } } else { - if (!lhs_val.isUndef() and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { + if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { // Compare ints: const vs. var try sema.resolveLazyValue(lhs_val); if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { @@ -30108,7 +30105,7 @@ fn cmpNumeric( } } else { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (!rhs_val.isUndef() and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { + if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { // Compare ints: var vs. const try sema.resolveLazyValue(rhs_val); if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { @@ -30177,7 +30174,7 @@ fn cmpNumeric( var lhs_bits: usize = undefined; if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { try sema.resolveLazyValue(lhs_val); - if (lhs_val.isUndef()) + if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (lhs_val.isNan(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, @@ -30236,7 +30233,7 @@ fn cmpNumeric( var rhs_bits: usize = undefined; if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { try sema.resolveLazyValue(rhs_val); - if (rhs_val.isUndef()) + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (rhs_val.isNan(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, @@ -30441,7 +30438,7 @@ fn cmpVector( const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { return sema.addConstUndef(result_ty); } const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty); @@ -30558,11 +30555,12 @@ fn unionToTag( un: Air.Inst.Ref, un_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| { return sema.addConstant(enum_ty, opv); } if (try sema.resolveMaybeUndefVal(un)) |un_val| { - return sema.addConstant(enum_ty, un_val.unionTag()); + return sema.addConstant(enum_ty, un_val.unionTag(mod)); } try sema.requireRuntimeBlock(block, un_src, null); return block.addTyOp(.get_union_tag, enum_ty, un); @@ -31718,6 +31716,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()), // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -31845,6 +31844,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .none => return ty, .u1_type, + .u5_type, .u8_type, .i8_type, .u16_type, @@ -31904,6 +31904,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, + .one_u5 => unreachable, + .four_u5 => unreachable, .negative_one => unreachable, .calling_convention_c => unreachable, .calling_convention_inline => unreachable, @@ -32720,7 +32722,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (explicit_enum_info) |tag_info| { - const enum_index = tag_info.nameIndex(mod.intern_pool, field_name_ip) orelse { + const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse { const msg = msg: { const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, @@ -33186,19 +33188,30 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .opaque_type => null, .enum_type => |enum_type| switch (enum_type.tag_mode) { .nonexhaustive => { - if (enum_type.tag_ty != .comptime_int_type and - !(try sema.typeHasRuntimeBits(enum_type.tag_ty.toType()))) - { - return Value.enum_field_0; - } else { - return null; + if (enum_type.tag_ty == .comptime_int_type) return null; + + if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.ip_index, + .int = int_opv.ip_index, + } }); + return only.toValue(); } + + return null; }, .auto, .explicit => switch (enum_type.names.len) { 0 => return Value.@"unreachable", 1 => { if (enum_type.values.len == 0) { - return Value.enum_field_0; // auto-numbered + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.ip_index, + .int = try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }), + } }); + return only.toValue(); } else { return enum_type.values[0].toValue(); } @@ -33208,6 +33221,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -33397,8 +33411,9 @@ pub fn analyzeAddressSpace( zir_ref: Zir.Inst.Ref, ctx: AddressSpaceContext, ) !std.builtin.AddressSpace { + const mod = sema.mod; const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref, "addresspace must be comptime-known"); - const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace); + const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); const target = sema.mod.getTarget(); const arch = target.cpu.arch; @@ -33766,6 +33781,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()), // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -33921,9 +33937,9 @@ fn numberAddWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; - const mod = sema.mod; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; + if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intAdd(lhs, rhs, ty); } @@ -33975,9 +33991,9 @@ fn numberSubWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; - const mod = sema.mod; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; + if (ty.zigTypeTag(mod) == .ComptimeInt) { return sema.intSub(lhs, rhs, ty); } @@ -34222,17 +34238,12 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { const mod = sema.mod; const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; assert(enum_type.tag_mode != .nonexhaustive); - if (enum_type.values.len == 0) { - // auto-numbered - return sema.intInRange(enum_type.tag_ty.toType(), int, enum_type.names.len); - } - // The `tagValueIndex` function call below relies on the type being the integer tag type. // `getCoerced` assumes the value will fit the new type. if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false; const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.ip_index, enum_type.tag_ty); - return enum_type.tagValueIndex(mod.intern_pool, int_coerced) != null; + return enum_type.tagValueIndex(&mod.intern_pool, int_coerced) != null; } fn intAddWithOverflow( diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 57ef662a9e..a18f49b96f 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -197,9 +197,6 @@ pub fn print( }, .empty_array => return writer.writeAll(".{}"), .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), - .enum_field_index => { - return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data, mod)}); - }, .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { const str_lit = val.castTag(.str_lit).?.data; @@ -255,7 +252,7 @@ pub fn print( const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic }; - if (elem_val.isUndef()) break :str; + if (elem_val.isUndef(mod)) break :str; buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } @@ -358,6 +355,20 @@ pub fn print( .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), }, + .enum_tag => |enum_tag| { + try writer.writeAll("@intToEnum("); + try print(.{ + .ty = Type.type, + .val = enum_tag.ty.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(", "); + try print(.{ + .ty = mod.intern_pool.typeOf(enum_tag.int).toType(), + .val = enum_tag.int.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(")"); + return; + }, .float => |float| switch (float.storage) { inline else => |x| return writer.print("{}", .{x}), }, @@ -414,7 +425,7 @@ fn printAggregate( var i: u32 = 0; while (i < max_len) : (i += 1) { const elem = try val.fieldValue(ty, mod, i); - if (elem.isUndef()) break :str; + if (elem.isUndef(mod)) break :str; buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } diff --git a/src/Zir.zig b/src/Zir.zig index 34479cce5e..136920d75d 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2052,6 +2052,7 @@ pub const Inst = struct { /// and `[]Ref`. pub const Ref = enum(u32) { u1_type = @enumToInt(InternPool.Index.u1_type), + u5_type = @enumToInt(InternPool.Index.u5_type), u8_type = @enumToInt(InternPool.Index.u8_type), i8_type = @enumToInt(InternPool.Index.i8_type), u16_type = @enumToInt(InternPool.Index.u16_type), @@ -2120,6 +2121,8 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), + one_u5 = @enumToInt(InternPool.Index.one_u5), + four_u5 = @enumToInt(InternPool.Index.four_u5), negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a2f4f81053..6ae5163714 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -11,6 +11,7 @@ const log = std.log.scoped(.codegen); const codegen = @import("../../codegen.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Decl = Module.Decl; const Type = @import("../../type.zig").Type; const Value = @import("../../value.zig").Value; @@ -3044,11 +3045,12 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo( } fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; var val = arg_val; if (val.castTag(.runtime_value)) |rt| { val = rt.data; } - if (val.isUndefDeep()) return func.emitUndefined(ty); + if (val.isUndefDeep(mod)) return func.emitUndefined(ty); if (val.castTag(.decl_ref)) |decl_ref| { const decl_index = decl_ref.data; return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); @@ -3057,7 +3059,6 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const decl_index = decl_ref_mut.data.decl_index; return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); } - const mod = func.bin_file.base.options.module.?; switch (ty.zigTypeTag(mod)) { .Void => return WValue{ .none = {} }, .Int => { @@ -3100,18 +3101,9 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, }, .Enum => { - if (val.castTag(.enum_field_index)) |field_index| { - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - const tag_val = enum_type.values[field_index.data]; - return func.lowerConstant(tag_val.toValue(), enum_type.tag_ty.toType()); - } else { - return WValue{ .imm32 = field_index.data }; - } - } else { - const int_tag_ty = try ty.intTagType(mod); - return func.lowerConstant(val, int_tag_ty); - } + const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); }, .ErrorSet => switch (val.tag()) { .@"error" => { @@ -3223,37 +3215,42 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// Returns a `Value` as a signed 32 bit value. /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. -fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) !i32 { +fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { const mod = func.bin_file.base.options.module.?; - switch (ty.zigTypeTag(mod)) { - .Enum => { - if (val.castTag(.enum_field_index)) |field_index| { - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - const tag_val = enum_type.values[field_index.data]; - return func.valueAsI32(tag_val.toValue(), enum_type.tag_ty.toType()); - } else { - return @bitCast(i32, field_index.data); - } - } else { - const int_tag_ty = try ty.intTagType(mod); - return func.valueAsI32(val, int_tag_ty); - } - }, - .Int => switch (ty.intInfo(mod).signedness) { - .signed => return @truncate(i32, val.toSignedInt(mod)), - .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(mod))), + + switch (val.ip_index) { + .none => {}, + .bool_true => return 1, + .bool_false => return 0, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int), + .int => |int| intStorageAsI32(int.storage), + .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int), + else => unreachable, }, + } + + switch (ty.zigTypeTag(mod)) { .ErrorSet => { const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function return @bitCast(i32, kv.value); }, - .Bool => return @intCast(i32, val.toSignedInt(mod)), - .Pointer => return @intCast(i32, val.toSignedInt(mod)), else => unreachable, // Programmer called this function for an illegal type } } +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index) i32 { + return intStorageAsI32(ip.indexToKey(int).int.storage); +} + +fn intStorageAsI32(storage: InternPool.Key.Int.Storage) i32 { + return switch (storage) { + .i64 => |x| @intCast(i32, x), + .u64 => |x| @bitCast(i32, @intCast(u32, x)), + .big_int => unreachable, + }; +} + fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; @@ -3772,7 +3769,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { for (items, 0..) |ref, i| { const item_val = (try func.air.value(ref, mod)).?; - const int_val = try func.valueAsI32(item_val, target_ty); + const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; } @@ -5071,12 +5068,8 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const tag_int = blk: { const tag_ty = union_ty.unionTagTypeHypothetical(mod); - const enum_field_index = tag_ty.enumFieldIndex(field_name).?; - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, enum_field_index), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); + const enum_field_index = tag_ty.enumFieldIndex(field_name, mod).?; + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); break :blk try func.lowerConstant(tag_val, tag_ty); }; if (layout.payload_size == 0) { @@ -6815,7 +6808,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse. // generate an if-else chain for each tag value as well as constant. - for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index| { + for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index_usize| { + const field_index = @intCast(u32, field_index_usize); const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); // for each tag name, create an unnamed const, // and then get a pointer to its value. @@ -6857,11 +6851,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.local_get)); try leb.writeULEB128(writer, @as(u32, 1)); - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - const tag_value = try func.lowerConstant(Value.initPayload(&tag_val_payload.base), enum_ty); + const tag_val = try mod.enumValueFieldIndex(enum_ty, field_index); + const tag_value = try func.lowerConstant(tag_val, enum_ty); switch (tag_value) { .imm32 => |value| { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 72f416ca87..7e2e37667e 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2029,13 +2029,10 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { exitlude_jump_relocs, enum_ty.enumFields(mod), 0.., - ) |*exitlude_jump_reloc, tag_name_ip, index| { + ) |*exitlude_jump_reloc, tag_name_ip, index_usize| { + const index = @intCast(u32, index_usize); const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); - var tag_pl = Value.Payload.U32{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, index), - }; - const tag_val = Value.initPayload(&tag_pl.base); + const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv); const skip_reloc = try self.asmJccReloc(undefined, .ne); @@ -11415,8 +11412,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const field_name = union_obj.fields.keys()[extra.field_index]; const tag_ty = union_obj.tag_ty; const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); - var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; - const tag_val = Value.initPayload(&tag_pl.base); + const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); const tag_int_val = try tag_val.enumToInt(tag_ty, mod); const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) diff --git a/src/codegen.zig b/src/codegen.zig index 148a69016a..90b6bfccf2 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -196,7 +196,7 @@ pub fn generateSymbol( typed_value.val.fmtValue(typed_value.ty, mod), }); - if (typed_value.val.isUndefDeep()) { + if (typed_value.val.isUndefDeep(mod)) { const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); return Result.ok; @@ -1168,7 +1168,7 @@ pub fn genTypedValue( typed_value.val.fmtValue(typed_value.ty, mod), }); - if (typed_value.val.isUndef()) + if (typed_value.val.isUndef(mod)) return GenResult.mcv(.undef); const target = bin_file.options.target; @@ -1229,24 +1229,12 @@ pub fn genTypedValue( } }, .Enum => { - if (typed_value.val.castTag(.enum_field_index)) |field_index| { - const enum_type = mod.intern_pool.indexToKey(typed_value.ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - const tag_val = enum_type.values[field_index.data]; - return genTypedValue(bin_file, src_loc, .{ - .ty = enum_type.tag_ty.toType(), - .val = tag_val.toValue(), - }, owner_decl_index); - } else { - return GenResult.mcv(.{ .immediate = field_index.data }); - } - } else { - const int_tag_ty = try typed_value.ty.intTagType(mod); - return genTypedValue(bin_file, src_loc, .{ - .ty = int_tag_ty, - .val = typed_value.val, - }, owner_decl_index); - } + const enum_tag = mod.intern_pool.indexToKey(typed_value.val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return genTypedValue(bin_file, src_loc, .{ + .ty = int_tag_ty.toType(), + .val = enum_tag.int.toValue(), + }, owner_decl_index); }, .ErrorSet => { switch (typed_value.val.tag()) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index c8e7303545..2ee7dab2fe 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -748,7 +748,7 @@ pub const DeclGen = struct { .ReleaseFast, .ReleaseSmall => false, }; - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { switch (ty.zigTypeTag(mod)) { .Bool => { if (safety_on) { @@ -1183,7 +1183,7 @@ pub const DeclGen = struct { var index: usize = 0; while (index < ai.len) : (index += 1) { const elem_val = try val.elemValue(mod, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try literal.writeChar(elem_val_u8); } if (ai.sentinel) |s| { @@ -1197,7 +1197,7 @@ pub const DeclGen = struct { while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); const elem_val = try val.elemValue(mod, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); try writer.print("'\\x{x}'", .{elem_val_u8}); } if (ai.sentinel) |s| { @@ -1284,23 +1284,16 @@ pub const DeclGen = struct { try dg.renderValue(writer, error_ty, error_val, initializer_type); try writer.writeAll(" }"); }, - .Enum => { - switch (val.tag()) { - .enum_field_index => { - const field_index = val.castTag(.enum_field_index).?.data; - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - const tag_val = enum_type.values[field_index]; - return dg.renderValue(writer, enum_type.tag_ty.toType(), tag_val.toValue(), location); - } else { - return writer.print("{d}", .{field_index}); - } - }, - else => { - const int_tag_ty = try ty.intTagType(mod); - return dg.renderValue(writer, int_tag_ty, val, location); - }, - } + .Enum => switch (val.ip_index) { + .none => { + const int_tag_ty = try ty.intTagType(mod); + return dg.renderValue(writer, int_tag_ty, val, location); + }, + else => { + const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location); + }, }, .Fn => switch (val.tag()) { .function => { @@ -2524,13 +2517,10 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try w.writeByte('('); try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, 0, .complete); try w.writeAll(") {\n switch (tag) {\n"); - for (enum_ty.enumFields(mod), 0..) |name_ip, index| { + for (enum_ty.enumFields(mod), 0..) |name_ip, index_usize| { + const index = @intCast(u32, index_usize); const name = mod.intern_pool.stringToSlice(name_ip); - var tag_pl: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, index), - }; - const tag_val = Value.initPayload(&tag_pl.base); + const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const int_val = try tag_val.enumToInt(enum_ty, mod); @@ -3609,7 +3599,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const ptr_val = try f.resolveInst(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); - const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |v| v.isUndefDeep(mod) else false; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -4267,7 +4257,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const pl_op = f.air.instructions.items(.data)[inst].pl_op; const name = f.air.nullTerminatedString(pl_op.payload); - const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep() else false; + const operand_is_undef = if (try f.air.value(pl_op.operand, mod)) |v| v.isUndefDeep(mod) else false; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -6290,7 +6280,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const value = try f.resolveInst(bin_op.rhs); const elem_ty = f.typeOf(bin_op.rhs); const elem_abi_size = elem_ty.abiSize(mod); - const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try f.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; const writer = f.object.writer(); if (val_is_undef) { @@ -6907,11 +6897,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { if (layout.tag_size != 0) { const field_index = tag_ty.enumFieldIndex(field_name, mod).?; - var tag_pl: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - const tag_val = Value.initPayload(&tag_pl.base); + const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); const int_val = try tag_val.enumToInt(tag_ty, mod); @@ -7438,7 +7424,7 @@ fn formatIntLiteral( defer allocator.free(undef_limbs); var int_buf: Value.BigIntSpace = undefined; - const int = if (data.val.isUndefDeep()) blk: { + const int = if (data.val.isUndefDeep(mod)) blk: { undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits)); @memset(undef_limbs, undefPattern(BigIntLimb)); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index cc766c9562..e485b58c35 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3233,16 +3233,16 @@ pub const DeclGen = struct { } fn lowerValue(dg: *DeclGen, arg_tv: TypedValue) Error!*llvm.Value { + const mod = dg.module; + const target = mod.getTarget(); var tv = arg_tv; if (tv.val.castTag(.runtime_value)) |rt| { tv.val = rt.data; } - if (tv.val.isUndef()) { + if (tv.val.isUndef(mod)) { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } - const mod = dg.module; - const target = mod.getTarget(); switch (tv.ty.zigTypeTag(mod)) { .Bool => { const llvm_type = try dg.lowerType(tv.ty); @@ -8204,7 +8204,7 @@ pub const FuncGen = struct { const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(mod); - const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using @@ -8496,7 +8496,7 @@ pub const FuncGen = struct { const is_volatile = ptr_ty.isVolatilePtr(mod); if (try self.air.value(bin_op.rhs, mod)) |elem_val| { - if (elem_val.isUndefDeep()) { + if (elem_val.isUndefDeep(mod)) { // Even if safety is disabled, we still emit a memset to undefined since it conveys // extra information to LLVM. However, safety makes the difference between using // 0xaa or actual undefined for the fill byte. @@ -8890,15 +8890,12 @@ pub const FuncGen = struct { const tag_int_value = fn_val.getParam(0); const switch_instr = self.builder.buildSwitch(tag_int_value, unnamed_block, @intCast(c_uint, enum_type.names.len)); - for (enum_type.names, 0..) |_, field_index| { + for (enum_type.names, 0..) |_, field_index_usize| { + const field_index = @intCast(u32, field_index_usize); const this_tag_int_value = int: { - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; break :int try self.dg.lowerValue(.{ .ty = enum_ty, - .val = Value.initPayload(&tag_val_payload.base), + .val = try mod.enumValueFieldIndex(enum_ty, field_index), }); }; switch_instr.addCase(this_tag_int_value, named_block); @@ -8973,7 +8970,8 @@ pub const FuncGen = struct { usize_llvm_ty.constNull(), usize_llvm_ty.constNull(), }; - for (enum_type.names, 0..) |name_ip, field_index| { + for (enum_type.names, 0..) |name_ip, field_index_usize| { + const field_index = @intCast(u32, field_index_usize); const name = mod.intern_pool.stringToSlice(name_ip); const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_init_llvm_ty = str_init.typeOf(); @@ -8997,16 +8995,10 @@ pub const FuncGen = struct { slice_global.setAlignment(slice_alignment); const return_block = self.context.appendBasicBlock(fn_val, "Name"); - const this_tag_int_value = int: { - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - break :int try self.dg.lowerValue(.{ - .ty = enum_ty, - .val = Value.initPayload(&tag_val_payload.base), - }); - }; + const this_tag_int_value = try self.dg.lowerValue(.{ + .ty = enum_ty, + .val = try mod.enumValueFieldIndex(enum_ty, field_index), + }); switch_instr.addCase(this_tag_int_value, return_block); self.builder.positionBuilderAtEnd(return_block); @@ -9094,7 +9086,7 @@ pub const FuncGen = struct { for (values, 0..) |*val, i| { const elem = try mask.elemValue(mod, i); - if (elem.isUndef()) { + if (elem.isUndef(mod)) { val.* = llvm_i32.getUndef(); } else { const int = elem.toSignedInt(mod); @@ -9419,11 +9411,7 @@ pub const FuncGen = struct { const tag_ty = union_ty.unionTagTypeHypothetical(mod); const union_field_name = union_obj.fields.keys()[extra.field_index]; const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?; - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, enum_field_index), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); const tag_int_val = try tag_val.enumToInt(tag_ty, mod); break :blk tag_int_val.toUnsignedInt(mod); }; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 1176eb746d..a81e36fefa 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -614,7 +614,7 @@ pub const DeclGen = struct { const dg = self.dg; const mod = dg.module; - if (val.isUndef()) { + if (val.isUndef(mod)) { const size = ty.abiSize(mod); return try self.addUndef(size); } @@ -882,7 +882,7 @@ pub const DeclGen = struct { // const target = self.getTarget(); // TODO: Fix the resulting global linking for these paths. - // if (val.isUndef()) { + // if (val.isUndef(mod)) { // // Special case: the entire value is undefined. In this case, we can just // // generate an OpVariable with no initializer. // return try section.emit(self.spv.gpa, .OpVariable, .{ @@ -978,7 +978,7 @@ pub const DeclGen = struct { log.debug("constant: ty = {}, val = {}", .{ ty.fmt(self.module), val.fmtValue(ty, self.module) }); - if (val.isUndef()) { + if (val.isUndef(mod)) { return self.spv.constUndef(result_ty_ref); } @@ -2091,7 +2091,7 @@ pub const DeclGen = struct { var i: usize = 0; while (i < mask_len) : (i += 1) { const elem = try mask.elemValue(self.module, i); - if (elem.isUndef()) { + if (elem.isUndef(mod)) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { const int = elem.toSignedInt(mod); diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 4e75cfff97..452356de2c 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1304,7 +1304,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const index: u16 = blk: { - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { // TODO in release-fast and release-small, we should put undef in .bss break :blk self.data_section_index.?; } diff --git a/src/link/Elf.zig b/src/link/Elf.zig index c80d60d72a..b27967884e 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2456,7 +2456,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { const zig_ty = ty.zigTypeTag(mod); const val = decl.val; const shdr_index: u16 = blk: { - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { // TODO in release-fast and release-small, we should put undef in .bss break :blk self.data_section_index.?; } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 06f79cf3fb..e7723595db 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -2270,7 +2270,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { const single_threaded = self.base.options.single_threaded; const sect_id: u8 = blk: { // TODO finish and audit this function - if (val.isUndefDeep()) { + if (val.isUndefDeep(mod)) { if (mode == .ReleaseFast or mode == .ReleaseSmall) { @panic("TODO __DATA,__bss"); } else { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index ddf5130fd2..ef97a7fa7f 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -3374,7 +3374,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod } else if (decl.getVariable()) |variable| { if (!variable.is_mutable) { try wasm.parseAtom(atom_index, .{ .data = .read_only }); - } else if (variable.init.isUndefDeep()) { + } else if (variable.init.isUndefDeep(mod)) { // for safe build modes, we store the atom in the data segment, // whereas for unsafe build modes we store it in bss. const is_initialized = wasm.base.options.optimize_mode == .Debug or diff --git a/src/type.zig b/src/type.zig index 8358f3678d..d051191bfe 100644 --- a/src/type.zig +++ b/src/type.zig @@ -126,6 +126,7 @@ pub const Type = struct { }, // values, not types + .undef => unreachable, .un => unreachable, .extern_func => unreachable, .int => unreachable, @@ -1350,6 +1351,7 @@ pub const Type = struct { }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -1600,6 +1602,7 @@ pub const Type = struct { .enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -1713,6 +1716,7 @@ pub const Type = struct { }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -2104,6 +2108,7 @@ pub const Type = struct { .enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -2499,6 +2504,7 @@ pub const Type = struct { .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -2736,6 +2742,7 @@ pub const Type = struct { .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -3492,6 +3499,7 @@ pub const Type = struct { .opaque_type => unreachable, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -3826,19 +3834,30 @@ pub const Type = struct { .opaque_type => return null, .enum_type => |enum_type| switch (enum_type.tag_mode) { .nonexhaustive => { - if (enum_type.tag_ty != .comptime_int_type and - !enum_type.tag_ty.toType().hasRuntimeBits(mod)) - { - return Value.enum_field_0; - } else { - return null; + if (enum_type.tag_ty == .comptime_int_type) return null; + + if (try enum_type.tag_ty.toType().onePossibleValue(mod)) |int_opv| { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.ip_index, + .int = int_opv.ip_index, + } }); + return only.toValue(); } + + return null; }, .auto, .explicit => switch (enum_type.names.len) { 0 => return Value.@"unreachable", 1 => { if (enum_type.values.len == 0) { - return Value.enum_field_0; // auto-numbered + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.ip_index, + .int = try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }), + } }); + return only.toValue(); } else { return enum_type.values[0].toValue(); } @@ -3848,6 +3867,7 @@ pub const Type = struct { }, // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -4006,6 +4026,7 @@ pub const Type = struct { .enum_type => |enum_type| enum_type.tag_ty.toType().comptimeOnly(mod), // values, not types + .undef => unreachable, .un => unreachable, .simple_value => unreachable, .extern_func => unreachable, @@ -4224,36 +4245,22 @@ pub const Type = struct { return ip.stringToSlice(field_name); } - pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?usize { + pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?u32 { const ip = &mod.intern_pool; const enum_type = ip.indexToKey(ty.ip_index).enum_type; // If the string is not interned, then the field certainly is not present. const field_name_interned = ip.getString(field_name).unwrap() orelse return null; - return enum_type.nameIndex(ip.*, field_name_interned); + return enum_type.nameIndex(ip, field_name_interned); } /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or /// an integer which represents the enum value. Returns the field index in /// declaration order, or `null` if `enum_tag` does not match any field. - pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize { - if (enum_tag.castTag(.enum_field_index)) |payload| { - return @as(usize, payload.data); - } + pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 { const ip = &mod.intern_pool; const enum_type = ip.indexToKey(ty.ip_index).enum_type; - const tag_ty = enum_type.tag_ty.toType(); - if (enum_type.values.len == 0) { - if (enum_tag.compareAllWithZero(.lt, mod)) return null; - const end_val = mod.intValue(tag_ty, enum_type.names.len) catch |err| switch (err) { - // TODO: eliminate this failure condition - error.OutOfMemory => @panic("OOM"), - }; - if (enum_tag.compareScalar(.gte, end_val, tag_ty, mod)) return null; - return @intCast(usize, enum_tag.toUnsignedInt(mod)); - } else { - assert(ip.typeOf(enum_tag.ip_index) == enum_type.tag_ty); - return enum_type.tagValueIndex(ip.*, enum_tag.ip_index); - } + assert(ip.typeOf(enum_tag.ip_index) == enum_type.tag_ty); + return enum_type.tagValueIndex(ip, enum_tag.ip_index); } pub fn structFields(ty: Type, mod: *Module) Module.Struct.Fields { diff --git a/src/value.zig b/src/value.zig index bb3716d28e..84408424f0 100644 --- a/src/value.zig +++ b/src/value.zig @@ -73,8 +73,6 @@ pub const Value = struct { /// Pointer and length as sub `Value` objects. slice, enum_literal, - /// A specific enum tag, indicated by the field index (declaration order). - enum_field_index, @"error", /// When the type is error union: /// * If the tag is `.@"error"`, the error union is an error. @@ -143,8 +141,6 @@ pub const Value = struct { .str_lit => Payload.StrLit, .slice => Payload.Slice, - .enum_field_index => Payload.U32, - .ty, .lazy_align, .lazy_size, @@ -397,7 +393,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .enum_field_index => return self.copyPayloadShallow(arena, Payload.U32), .@"error" => return self.copyPayloadShallow(arena, Payload.Error), .aggregate => { @@ -515,7 +510,6 @@ pub const Value = struct { }, .empty_array => return out_stream.writeAll(".{}"), .enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), - .enum_field_index => return out_stream.print("(enum field {d})", .{val.castTag(.enum_field_index).?.data}), .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { const str_lit = val.castTag(.str_lit).?.data; @@ -618,87 +612,58 @@ pub const Value = struct { }; } - /// Asserts the type is an enum type. - pub fn toEnum(val: Value, comptime E: type) E { + pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { + const ip = &mod.intern_pool; switch (val.ip_index) { - .calling_convention_c => { - if (E == std.builtin.CallingConvention) { - return .C; + .none => { + const field_index = switch (val.tag()) { + .the_only_possible_value => blk: { + assert(ty.enumFieldCount(mod) == 1); + break :blk 0; + }, + .enum_literal => i: { + const name = val.castTag(.enum_literal).?.data; + break :i ty.enumFieldIndex(name, mod).?; + }, + else => unreachable, + }; + const enum_type = ip.indexToKey(ty.ip_index).enum_type; + if (enum_type.values.len != 0) { + return enum_type.values[field_index].toValue(); } else { - unreachable; + // Field index and integer values are the same. + return mod.intValue(enum_type.tag_ty.toType(), field_index); } }, - .calling_convention_inline => { - if (E == std.builtin.CallingConvention) { - return .Inline; - } else { - unreachable; - } + else => { + const enum_type = ip.indexToKey(ip.typeOf(val.ip_index)).enum_type; + const int = try ip.getCoerced(mod.gpa, val.ip_index, enum_type.tag_ty); + return int.toValue(); }, - .none => switch (val.tag()) { - .enum_field_index => { - const field_index = val.castTag(.enum_field_index).?.data; - return @intToEnum(E, field_index); - }, - .the_only_possible_value => { - const fields = std.meta.fields(E); - assert(fields.len == 1); - return @intToEnum(E, fields[0].value); - }, - else => unreachable, - }, - else => unreachable, } } - pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { - const field_index = switch (val.tag()) { - .enum_field_index => val.castTag(.enum_field_index).?.data, - .the_only_possible_value => blk: { - assert(ty.enumFieldCount(mod) == 1); - break :blk 0; - }, - .enum_literal => i: { - const name = val.castTag(.enum_literal).?.data; - break :i ty.enumFieldIndex(name, mod).?; - }, - // Assume it is already an integer and return it directly. - else => return val, - }; + pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { + _ = ty; // TODO: remove this parameter now that we use InternPool - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; - if (enum_type.values.len != 0) { - return enum_type.values[field_index].toValue(); - } else { - // Field index and integer values are the same. - return mod.intValue(enum_type.tag_ty.toType(), field_index); + if (val.castTag(.enum_literal)) |payload| { + return payload.data; } - } - - pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - if (ty.zigTypeTag(mod) == .Union) return val.unionTag().tagName(ty.unionTagTypeHypothetical(mod), mod); - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + const ip = &mod.intern_pool; - const field_index = switch (val.tag()) { - .enum_field_index => val.castTag(.enum_field_index).?.data, - .the_only_possible_value => blk: { - assert(ty.enumFieldCount(mod) == 1); - break :blk 0; - }, - .enum_literal => return val.castTag(.enum_literal).?.data, - else => field_index: { - if (enum_type.values.len == 0) { - // auto-numbered enum - break :field_index @intCast(u32, val.toUnsignedInt(mod)); - } - const field_index = enum_type.tagValueIndex(mod.intern_pool, val.ip_index).?; - break :field_index @intCast(u32, field_index); - }, + const enum_tag = switch (ip.indexToKey(val.ip_index)) { + .un => |un| ip.indexToKey(un.tag).enum_tag, + .enum_tag => |x| x, + else => unreachable, + }; + const enum_type = ip.indexToKey(enum_tag.ty).enum_type; + const field_index = field_index: { + const field_index = enum_type.tagValueIndex(ip, val.ip_index).?; + break :field_index @intCast(u32, field_index); }; - const field_name = enum_type.names[field_index]; - return mod.intern_pool.stringToSlice(field_name); + return ip.stringToSlice(field_name); } /// Asserts the value is an integer. @@ -722,10 +687,6 @@ pub const Value = struct { .the_only_possible_value, // i0, u0 => BigIntMutable.init(&space.limbs, 0).toConst(), - .enum_field_index => { - const index = val.castTag(.enum_field_index).?.data; - return BigIntMutable.init(&space.limbs, index).toConst(); - }, .runtime_value => { const sub_val = val.castTag(.runtime_value).?.data; return sub_val.toBigIntAdvanced(space, mod, opt_sema); @@ -759,6 +720,7 @@ pub const Value = struct { }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| int.storage.toBigInt(space), + .enum_tag => |enum_tag| mod.intern_pool.indexToKey(enum_tag.int).int.storage.toBigInt(space), else => unreachable, }, }; @@ -886,7 +848,7 @@ pub const Value = struct { }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - if (val.isUndef()) { + if (val.isUndef(mod)) { const size = @intCast(usize, ty.abiSize(mod)); @memset(buffer[0..size], 0xaa); return; @@ -1007,7 +969,7 @@ pub const Value = struct { ) error{ ReinterpretDeclRef, OutOfMemory }!void { const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - if (val.isUndef()) { + if (val.isUndef(mod)) { const bit_size = @intCast(usize, ty.bitSize(mod)); std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); return; @@ -1087,7 +1049,7 @@ pub const Value = struct { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => unreachable, // Handled in non-packed writeToMemory .Packed => { - const field_index = ty.unionTagFieldIndex(val.unionTag(), mod); + const field_index = ty.unionTagFieldIndex(val.unionTag(mod), mod); const field_type = ty.unionFields(mod).values()[field_index.?].ty; const field_val = try val.fieldValue(field_type, mod, field_index.?); @@ -1432,7 +1394,7 @@ pub const Value = struct { } pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { - assert(!val.isUndef()); + assert(!val.isUndef(mod)); switch (val.ip_index) { .bool_false => return 0, .bool_true => return 1, @@ -1450,7 +1412,7 @@ pub const Value = struct { } pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - assert(!val.isUndef()); + assert(!val.isUndef(mod)); const info = ty.intInfo(mod); @@ -1468,7 +1430,7 @@ pub const Value = struct { } pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - assert(!val.isUndef()); + assert(!val.isUndef(mod)); const info = ty.intInfo(mod); @@ -1578,7 +1540,6 @@ pub const Value = struct { .variable, => .gt, - .enum_field_index => return std.math.order(lhs.castTag(.enum_field_index).?.data, 0), .runtime_value => { // This is needed to correctly handle hashing the value. // Checks in Sema should prevent direct comparisons from reaching here. @@ -1633,6 +1594,10 @@ pub const Value = struct { .big_int => |big_int| big_int.orderAgainstScalar(0), inline .u64, .i64 => |x| std.math.order(x, 0), }, + .enum_tag => |enum_tag| switch (mod.intern_pool.indexToKey(enum_tag.int).int.storage) { + .big_int => |big_int| big_int.orderAgainstScalar(0), + inline .u64, .i64 => |x| std.math.order(x, 0), + }, .float => |float| switch (float.storage) { inline else => |x| std.math.order(x, 0), }, @@ -1861,11 +1826,6 @@ pub const Value = struct { const b_name = b.castTag(.enum_literal).?.data; return std.mem.eql(u8, a_name, b_name); }, - .enum_field_index => { - const a_field_index = a.castTag(.enum_field_index).?.data; - const b_field_index = b.castTag(.enum_field_index).?.data; - return a_field_index == b_field_index; - }, .opt_payload => { const a_payload = a.castTag(.opt_payload).?.data; const b_payload = b.castTag(.opt_payload).?.data; @@ -2064,13 +2024,9 @@ pub const Value = struct { } const field_name = tuple.names[0]; const union_obj = mod.typeToUnion(ty).?; - const field_index = union_obj.fields.getIndex(field_name) orelse return false; + const field_index = @intCast(u32, union_obj.fields.getIndex(field_name) orelse return false); const tag_and_val = b.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - const field_tag = Value.initPayload(&field_tag_buf.base); + const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, field_index); const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); if (!tag_matches) return false; return eqlAdvanced(tag_and_val.val, union_obj.tag_ty, tuple.values[0], tuple.types[0], mod, opt_sema); @@ -2132,7 +2088,7 @@ pub const Value = struct { } const zig_ty_tag = ty.zigTypeTag(mod); std.hash.autoHash(hasher, zig_ty_tag); - if (val.isUndef()) return; + if (val.isUndef(mod)) return; // The value is runtime-known and shouldn't affect the hash. if (val.isRuntimeValue()) return; @@ -2277,7 +2233,7 @@ pub const Value = struct { /// This function is used by hash maps and so treats floating-point NaNs as equal /// to each other, and not equal to other floating-point values. pub fn hashUncoerced(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { - if (val.isUndef()) return; + if (val.isUndef(mod)) return; // The value is runtime-known and shouldn't affect the hash. if (val.isRuntimeValue()) return; @@ -2726,16 +2682,12 @@ pub const Value = struct { } } - pub fn unionTag(val: Value) Value { - switch (val.ip_index) { - .undef => return val, - .none => switch (val.tag()) { - .enum_field_index => return val, - .@"union" => return val.castTag(.@"union").?.data.tag, - else => unreachable, - }, + pub fn unionTag(val: Value, mod: *Module) Value { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .undef, .enum_tag => val, + .un => |un| un.tag.toValue(), else => unreachable, - } + }; } /// Returns a pointer to the element value at the index. @@ -2769,27 +2721,30 @@ pub const Value = struct { }); } - pub fn isUndef(val: Value) bool { - return val.ip_index == .undef; + pub fn isUndef(val: Value, mod: *Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .undef => true, + .simple_value => |v| v == .undefined, + else => false, + }; } /// TODO: check for cases such as array that is not marked undef but all the element /// values are marked undef, or struct that is not marked undef but all fields are marked /// undef, etc. - pub fn isUndefDeep(val: Value) bool { - return val.isUndef(); + pub fn isUndefDeep(val: Value, mod: *Module) bool { + return val.isUndef(mod); } /// Returns true if any value contained in `self` is undefined. - /// TODO: check for cases such as array that is not marked undef but all the element - /// values are marked undef, or struct that is not marked undef but all fields are marked - /// undef, etc. - pub fn anyUndef(self: Value, mod: *Module) !bool { - switch (self.ip_index) { + pub fn anyUndef(val: Value, mod: *Module) !bool { + if (val.ip_index == .none) return false; + switch (val.ip_index) { .undef => return true, - .none => switch (self.tag()) { + .none => switch (val.tag()) { .slice => { - const payload = self.castTag(.slice).?; + const payload = val.castTag(.slice).?; const len = payload.data.len.toUnsignedInt(mod); for (0..len) |i| { @@ -2799,14 +2754,21 @@ pub const Value = struct { }, .aggregate => { - const payload = self.castTag(.aggregate).?; - for (payload.data) |val| { - if (try val.anyUndef(mod)) return true; + const payload = val.castTag(.aggregate).?; + for (payload.data) |field| { + if (try field.anyUndef(mod)) return true; } }, else => {}, }, - else => {}, + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .undef => return true, + .simple_value => |v| if (v == .undefined) return true, + .aggregate => |aggregate| for (aggregate.fields) |field| { + if (try anyUndef(field.toValue(), mod)) return true; + }, + else => {}, + }, } return false; @@ -2819,11 +2781,7 @@ pub const Value = struct { .undef => unreachable, .unreachable_value => unreachable, - .null_value, - .zero, - .zero_usize, - .zero_u8, - => true, + .null_value => true, .none => switch (val.tag()) { .opt_payload => false, @@ -2843,6 +2801,7 @@ pub const Value = struct { .big_int => |big_int| big_int.eqZero(), inline .u64, .i64 => |x| x == 0, }, + .opt => |opt| opt.val == .none, else => unreachable, }, }; @@ -3024,8 +2983,8 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - assert(!lhs.isUndef()); - assert(!rhs.isUndef()); + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); const info = ty.intInfo(mod); @@ -3071,8 +3030,8 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - assert(!lhs.isUndef()); - assert(!rhs.isUndef()); + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); const info = ty.intInfo(mod); @@ -3178,7 +3137,7 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; if (ty.zigTypeTag(mod) == .ComptimeInt) { return intMul(lhs, rhs, ty, arena, mod); @@ -3220,8 +3179,8 @@ pub const Value = struct { arena: Allocator, mod: *Module, ) !Value { - assert(!lhs.isUndef()); - assert(!rhs.isUndef()); + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); const info = ty.intInfo(mod); @@ -3249,7 +3208,7 @@ pub const Value = struct { /// Supports both floats and ints; handles undefined. pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value { - if (lhs.isUndef() or rhs.isUndef()) return undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; if (lhs.isNan(mod)) return rhs; if (rhs.isNan(mod)) return lhs; @@ -3261,7 +3220,7 @@ pub const Value = struct { /// Supports both floats and ints; handles undefined. pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value { - if (lhs.isUndef() or rhs.isUndef()) return undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; if (lhs.isNan(mod)) return rhs; if (rhs.isNan(mod)) return lhs; @@ -3286,7 +3245,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (val.isUndef()) return Value.undef; + if (val.isUndef(mod)) return Value.undef; const info = ty.intInfo(mod); @@ -3324,7 +3283,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -3358,7 +3317,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty); @@ -3381,7 +3340,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -3415,7 +3374,7 @@ pub const Value = struct { /// operands must be integers; handles undefined. pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.undef; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -4697,11 +4656,6 @@ pub const Value = struct { pub const Payload = struct { tag: Tag, - pub const U32 = struct { - base: Payload, - data: u32, - }; - pub const Function = struct { base: Payload, data: *Module.Fn, @@ -4885,16 +4839,6 @@ pub const Value = struct { pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined }; pub const empty_struct: Value = .{ .ip_index = .empty_struct, .legacy = undefined }; - pub const enum_field_0: Value = .{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &enum_field_0_payload.base }, - }; - - var enum_field_0_payload: Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = 0, - }; - pub fn makeBool(x: bool) Value { return if (x) Value.true else Value.false; } -- cgit v1.2.3 From d18881de1be811c1dff52590223b92c916c4b773 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 14 May 2023 19:23:41 -0700 Subject: stage2: move anon tuples and anon structs to InternPool --- src/InternPool.zig | 203 ++++++++++- src/Sema.zig | 715 +++++++++++++++++++++------------------ src/TypedValue.zig | 20 +- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen/c.zig | 56 ++- src/codegen/c/type.zig | 6 +- src/codegen/llvm.zig | 518 ++++++++++++++-------------- src/codegen/spirv.zig | 11 +- src/link/Dwarf.zig | 22 +- src/type.zig | 806 +++++++++++++------------------------------- src/value.zig | 52 ++- 11 files changed, 1147 insertions(+), 1264 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index eace006d4c..74cc452176 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -137,9 +137,14 @@ pub const Key = union(enum) { payload_type: Index, }, simple_type: SimpleType, - /// If `empty_struct_type` is handled separately, then this value may be - /// safely assumed to never be `none`. + /// This represents a struct that has been explicitly declared in source code, + /// or was created with `@Type`. It is unique and based on a declaration. + /// It may be a tuple, if declared like this: `struct {A, B, C}`. struct_type: StructType, + /// This is an anonymous struct or tuple type which has no corresponding + /// declaration. It is used for types that have no `struct` keyword in the + /// source code, and were not created via `@Type`. + anon_struct_type: AnonStructType, union_type: UnionType, opaque_type: OpaqueType, enum_type: EnumType, @@ -168,7 +173,7 @@ pub const Key = union(enum) { /// Each element/field stored as an `Index`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, /// so the slice length will be one more than the type's array length. - aggregate: Aggregate, + aggregate: Key.Aggregate, /// An instance of a union. un: Union, @@ -222,22 +227,25 @@ pub const Key = union(enum) { namespace: Module.Namespace.Index, }; - /// There are three possibilities here: - /// * `@TypeOf(.{})` (untyped empty struct literal) - /// - namespace == .none, index == .none - /// * A struct which has a namepace, but no fields. - /// - index == .none - /// * A struct which has fields as well as a namepace. pub const StructType = struct { - /// The `none` tag is used to represent two cases: - /// * `@TypeOf(.{})`, in which case `namespace` will also be `none`. - /// * A struct with no fields, in which case `namespace` will be populated. + /// The `none` tag is used to represent a struct with no fields. index: Module.Struct.OptionalIndex, - /// This will be `none` only in the case of `@TypeOf(.{})` - /// (`Index.empty_struct_type`). + /// May be `none` if the struct has no declarations. namespace: Module.Namespace.OptionalIndex, }; + pub const AnonStructType = struct { + types: []const Index, + /// This may be empty, indicating this is a tuple. + names: []const NullTerminatedString, + /// These elements may be `none`, indicating runtime-known. + values: []const Index, + + pub fn isTuple(self: AnonStructType) bool { + return self.names.len == 0; + } + }; + pub const UnionType = struct { index: Module.Union.Index, runtime_tag: RuntimeTag, @@ -498,6 +506,12 @@ pub const Key = union(enum) { std.hash.autoHash(hasher, aggregate.ty); for (aggregate.fields) |field| std.hash.autoHash(hasher, field); }, + + .anon_struct_type => |anon_struct_type| { + for (anon_struct_type.types) |elem| std.hash.autoHash(hasher, elem); + for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem); + for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem); + }, } } @@ -650,6 +664,12 @@ pub const Key = union(enum) { if (a_info.ty != b_info.ty) return false; return std.mem.eql(Index, a_info.fields, b_info.fields); }, + .anon_struct_type => |a_info| { + const b_info = b.anon_struct_type; + return std.mem.eql(Index, a_info.types, b_info.types) and + std.mem.eql(Index, a_info.values, b_info.values) and + std.mem.eql(NullTerminatedString, a_info.names, b_info.names); + }, } } @@ -666,6 +686,7 @@ pub const Key = union(enum) { .union_type, .opaque_type, .enum_type, + .anon_struct_type, => .type_type, inline .ptr, @@ -1020,9 +1041,10 @@ pub const static_keys = [_]Key{ .{ .simple_type = .var_args_param }, // empty_struct_type - .{ .struct_type = .{ - .namespace = .none, - .index = .none, + .{ .anon_struct_type = .{ + .types = &.{}, + .names = &.{}, + .values = &.{}, } }, .{ .simple_value = .undefined }, @@ -1144,6 +1166,12 @@ pub const Tag = enum(u8) { /// Module.Struct object allocated for it. /// data is Module.Namespace.Index. type_struct_ns, + /// An AnonStructType which stores types, names, and values for each field. + /// data is extra index of `TypeStructAnon`. + type_struct_anon, + /// An AnonStructType which has only types and values for each field. + /// data is extra index of `TypeStructAnon`. + type_tuple_anon, /// A tagged union type. /// `data` is `Module.Union.Index`. type_union_tagged, @@ -1249,6 +1277,26 @@ pub const Tag = enum(u8) { only_possible_value, /// data is extra index to Key.Union. union_value, + /// An instance of a struct, array, or vector. + /// data is extra index to `Aggregate`. + aggregate, +}; + +/// Trailing: +/// 0. element: Index for each len +/// len is determined by the aggregate type. +pub const Aggregate = struct { + /// The type of the aggregate. + ty: Index, +}; + +/// Trailing: +/// 0. type: Index for each fields_len +/// 1. value: Index for each fields_len +/// 2. name: NullTerminatedString for each fields_len +/// The set of field names is omitted when the `Tag` is `type_tuple_anon`. +pub const TypeStructAnon = struct { + fields_len: u32, }; /// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to @@ -1572,6 +1620,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { } pub fn indexToKey(ip: InternPool, index: Index) Key { + assert(index != .none); const item = ip.items.get(@enumToInt(index)); const data = item.data; return switch (item.tag) { @@ -1659,6 +1708,30 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .namespace = @intToEnum(Module.Namespace.Index, data).toOptional(), } }, + .type_struct_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data); + const fields_len = type_struct_anon.data.fields_len; + const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len]; + return .{ .anon_struct_type = .{ + .types = @ptrCast([]const Index, types), + .values = @ptrCast([]const Index, values), + .names = @ptrCast([]const NullTerminatedString, names), + } }; + }, + .type_tuple_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data); + const fields_len = type_struct_anon.data.fields_len; + const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + return .{ .anon_struct_type = .{ + .types = @ptrCast([]const Index, types), + .values = @ptrCast([]const Index, values), + .names = &.{}, + } }; + }, + .type_union_untagged => .{ .union_type = .{ .index = @intToEnum(Module.Union.Index, data), .runtime_tag = .none, @@ -1797,6 +1870,15 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { else => unreachable, }; }, + .aggregate => { + const extra = ip.extraDataTrail(Aggregate, data); + const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty)); + const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]); + return .{ .aggregate = .{ + .ty = extra.data.ty, + .fields = fields, + } }; + }, .union_value => .{ .un = ip.extraData(Key.Union, data) }, .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) }, }; @@ -1982,6 +2064,45 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, + .anon_struct_type => |anon_struct_type| { + assert(anon_struct_type.types.len == anon_struct_type.values.len); + for (anon_struct_type.types) |elem| assert(elem != .none); + + const fields_len = @intCast(u32, anon_struct_type.types.len); + if (anon_struct_type.names.len == 0) { + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 2), + ); + ip.items.appendAssumeCapacity(.{ + .tag = .type_tuple_anon, + .data = ip.addExtraAssumeCapacity(TypeStructAnon{ + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); + return @intToEnum(Index, ip.items.len - 1); + } + + assert(anon_struct_type.names.len == anon_struct_type.types.len); + + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3), + ); + ip.items.appendAssumeCapacity(.{ + .tag = .type_struct_anon, + .data = ip.addExtraAssumeCapacity(TypeStructAnon{ + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.names)); + return @intToEnum(Index, ip.items.len - 1); + }, + .union_type => |union_type| { ip.items.appendAssumeCapacity(.{ .tag = switch (union_type.runtime_tag) { @@ -2269,6 +2390,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .aggregate => |aggregate| { + assert(aggregate.ty != .none); + for (aggregate.fields) |elem| assert(elem != .none); + if (aggregate.fields.len != ip.aggregateTypeLen(aggregate.ty)) { + std.debug.print("aggregate fields len = {d}, type len = {d}\n", .{ + aggregate.fields.len, + ip.aggregateTypeLen(aggregate.ty), + }); + } + assert(aggregate.fields.len == ip.aggregateTypeLen(aggregate.ty)); + if (aggregate.fields.len == 0) { ip.items.appendAssumeCapacity(.{ .tag = .only_possible_value, @@ -2276,7 +2407,19 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); return @intToEnum(Index, ip.items.len - 1); } - @panic("TODO"); + + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Aggregate).Struct.fields.len + aggregate.fields.len, + ); + + ip.items.appendAssumeCapacity(.{ + .tag = .aggregate, + .data = ip.addExtraAssumeCapacity(Aggregate{ + .ty = aggregate.ty, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.fields)); }, .un => |un| { @@ -2913,6 +3056,14 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_opaque => @sizeOf(Key.OpaqueType), .type_struct => @sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), .type_struct_ns => @sizeOf(Module.Namespace), + .type_struct_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); + }, + .type_tuple_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); + }, .type_union_tagged, .type_union_untagged, @@ -2942,6 +3093,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { }, .enum_tag => @sizeOf(Key.EnumTag), + .aggregate => b: { + const info = ip.extraData(Aggregate, data); + const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty)); + break :b @sizeOf(Aggregate) + (@sizeOf(u32) * fields_len); + }, + .float_f16 => 0, .float_f32 => 0, .float_f64 => @sizeOf(Float64), @@ -3079,3 +3236,13 @@ pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E { const int = ip.indexToKey(i).enum_tag.int; return @intToEnum(E, ip.indexToKey(int).int.storage.u64); } + +pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { + return switch (ip.indexToKey(ty)) { + .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .array_type => |array_type| array_type.len, + .vector_type => |vector_type| vector_type.len, + else => unreachable, + }; +} diff --git a/src/Sema.zig b/src/Sema.zig index 2fc364ebd7..31e07bdcdc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7896,12 +7896,15 @@ fn resolveGenericInstantiationType( } fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - if (!ty.isSimpleTupleOrAnonStruct()) return; - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |field_val, i| { - try sema.resolveTupleLazyValues(block, src, tuple.types[i]); - if (field_val.ip_index == .unreachable_value) continue; - try sema.resolveLazyValue(field_val); + const mod = sema.mod; + const tuple = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |tuple| tuple, + else => return, + }; + for (tuple.types, tuple.values) |field_ty, field_val| { + try sema.resolveTupleLazyValues(block, src, field_ty.toType()); + if (field_val == .none) continue; + try sema.resolveLazyValue(field_val.toValue()); } } @@ -12038,31 +12041,49 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs); const field_name = try sema.resolveConstString(block, name_src, extra.rhs, "field name must be comptime-known"); const ty = try sema.resolveTypeFields(unresolved_ty); + const ip = &mod.intern_pool; const has_field = hf: { - if (ty.isSlice(mod)) { - if (mem.eql(u8, field_name, "ptr")) break :hf true; - if (mem.eql(u8, field_name, "len")) break :hf true; - break :hf false; - } - if (ty.castTag(.anon_struct)) |pl| { - break :hf for (pl.data.names) |name| { - if (mem.eql(u8, name, field_name)) break true; - } else false; - } - if (ty.isTuple(mod)) { - const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; - break :hf field_index < ty.structFieldCount(mod); - } - break :hf switch (ty.zigTypeTag(mod)) { - .Struct => ty.structFields(mod).contains(field_name), - .Union => ty.unionFields(mod).contains(field_name), - .Enum => ty.enumFieldIndex(field_name, mod) != null, - .Array => mem.eql(u8, field_name, "len"), - else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ - ty.fmt(sema.mod), - }), - }; + switch (ip.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => { + if (mem.eql(u8, field_name, "ptr")) break :hf true; + if (mem.eql(u8, field_name, "len")) break :hf true; + break :hf false; + }, + else => {}, + }, + .anon_struct_type => |anon_struct| { + if (anon_struct.names.len != 0) { + // If the string is not interned, then the field certainly is not present. + const name_interned = ip.getString(field_name).unwrap() orelse break :hf false; + break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, name_interned) != null; + } else { + const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; + break :hf field_index < ty.structFieldCount(mod); + } + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :hf false; + assert(struct_obj.haveFieldTypes()); + break :hf struct_obj.fields.contains(field_name); + }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); + break :hf union_obj.fields.contains(field_name); + }, + .enum_type => |enum_type| { + // If the string is not interned, then the field certainly is not present. + const name_interned = ip.getString(field_name).unwrap() orelse break :hf false; + break :hf enum_type.nameIndex(ip, name_interned) != null; + }, + .array_type => break :hf mem.eql(u8, field_name, "len"), + else => {}, + } + return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ + ty.fmt(sema.mod), + }); }; if (has_field) { return Air.Inst.Ref.bool_true; @@ -12632,42 +12653,48 @@ fn analyzeTupleCat( } const final_len = try sema.usizeCast(block, rhs_src, dest_fields); - const types = try sema.arena.alloc(Type, final_len); - const values = try sema.arena.alloc(Value, final_len); + const types = try sema.arena.alloc(InternPool.Index, final_len); + const values = try sema.arena.alloc(InternPool.Index, final_len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i, mod); + types[i] = lhs_ty.structFieldType(i, mod).ip_index; const default_val = lhs_ty.structFieldDefaultValue(i, mod); - values[i] = default_val; + values[i] = default_val.ip_index; const operand_src = lhs_src; // TODO better source location if (default_val.ip_index == .unreachable_value) { runtime_src = operand_src; + values[i] = .none; } } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i, mod); + types[i + lhs_len] = rhs_ty.structFieldType(i, mod).ip_index; const default_val = rhs_ty.structFieldDefaultValue(i, mod); - values[i + lhs_len] = default_val; + values[i + lhs_len] = default_val.ip_index; const operand_src = rhs_src; // TODO better source location if (default_val.ip_index == .unreachable_value) { runtime_src = operand_src; + values[i + lhs_len] = .none; } } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .fields = values, + } }); + return sema.addConstant(tuple_ty.toType(), tuple_val.toValue()); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -12685,7 +12712,7 @@ fn analyzeTupleCat( try sema.tupleFieldValByIndex(block, operand_src, rhs, i, rhs_ty); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -12938,7 +12965,7 @@ fn analyzeTupleMul( block: *Block, src_node: i32, operand: Air.Inst.Ref, - factor: u64, + factor: usize, ) CompileError!Air.Inst.Ref { const mod = sema.mod; const operand_ty = sema.typeOf(operand); @@ -12947,44 +12974,45 @@ fn analyzeTupleMul( const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node }; const tuple_len = operand_ty.structFieldCount(mod); - const final_len_u64 = std.math.mul(u64, tuple_len, factor) catch + const final_len = std.math.mul(usize, tuple_len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); - if (final_len_u64 == 0) { + if (final_len == 0) { return sema.addConstant(Type.empty_struct_literal, Value.empty_struct); } - const final_len = try sema.usizeCast(block, rhs_src, final_len_u64); - - const types = try sema.arena.alloc(Type, final_len); - const values = try sema.arena.alloc(Value, final_len); + const types = try sema.arena.alloc(InternPool.Index, final_len); + const values = try sema.arena.alloc(InternPool.Index, final_len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; - var i: u32 = 0; - while (i < tuple_len) : (i += 1) { - types[i] = operand_ty.structFieldType(i, mod); - values[i] = operand_ty.structFieldDefaultValue(i, mod); + for (0..tuple_len) |i| { + types[i] = operand_ty.structFieldType(i, mod).ip_index; + values[i] = operand_ty.structFieldDefaultValue(i, mod).ip_index; const operand_src = lhs_src; // TODO better source location - if (values[i].ip_index == .unreachable_value) { + if (values[i] == .unreachable_value) { runtime_src = operand_src; + values[i] = .none; // TODO don't treat unreachable_value as special } } - i = 0; - while (i < factor) : (i += 1) { - mem.copyForwards(Type, types[tuple_len * i ..], types[0..tuple_len]); - mem.copyForwards(Value, values[tuple_len * i ..], values[0..tuple_len]); + for (0..factor) |i| { + mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]); + mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]); } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .fields = values, + } }); + return sema.addConstant(tuple_ty.toType(), tuple_val.toValue()); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -13000,7 +13028,7 @@ fn analyzeTupleMul( @memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -13020,7 +13048,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_ty.isTuple(mod)) { // In `**` rhs must be comptime-known, but lhs can be runtime-known const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known"); - return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor); + const factor_casted = try sema.usizeCast(block, rhs_src, factor); + return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted); } // Analyze the lhs first, to catch the case that someone tried to do exponentiation @@ -14533,19 +14562,14 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { .child = .u1_type, }) else Type.u1; - const types = try sema.arena.alloc(Type, 2); - const values = try sema.arena.alloc(Value, 2); - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ - .types = types, - .values = values, - }); - - types[0] = ty; - types[1] = ov_ty; - values[0] = Value.@"unreachable"; - values[1] = Value.@"unreachable"; - - return tuple_ty; + const types = [2]InternPool.Index{ ty.ip_index, ov_ty.ip_index }; + const values = [2]InternPool.Index{ .none, .none }; + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ + .types = &types, + .values = &values, + .names = &.{}, + } }); + return tuple_ty.toType(); } fn analyzeArithmetic( @@ -16506,57 +16530,66 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const layout = struct_ty.containerLayout(mod); const struct_field_vals = fv: { - if (struct_ty.isSimpleTupleOrAnonStruct()) { - const tuple = struct_ty.tupleFields(); - const field_types = tuple.types; - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, field_types.len); - for (struct_field_vals, 0..) |*struct_field_val, i| { - const field_ty = field_types[i]; - const name_val = v: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - const bytes = if (struct_ty.castTag(.anon_struct)) |payload| - try anon_decl.arena().dupeZ(u8, payload.data.names[i]) - else - try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); - const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), - 0, // default alignment - ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); - }; - - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); - const field_val = tuple.values[i]; - const is_comptime = field_val.ip_index != .unreachable_value; - const opt_default_val = if (is_comptime) field_val else null; - const default_val_ptr = try sema.optRefValue(block, field_ty, opt_default_val); - struct_field_fields.* = .{ - // name: []const u8, - name_val, - // type: type, - try Value.Tag.ty.create(fields_anon_decl.arena(), field_ty), - // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), - // is_comptime: bool, - Value.makeBool(is_comptime), - // alignment: comptime_int, - try field_ty.lazyAbiAlignment(mod, fields_anon_decl.arena()), - }; - struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); - } - break :fv struct_field_vals; - } - const struct_fields = struct_ty.structFields(mod); - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count()); + const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .anon_struct_type => |tuple| { + const struct_field_vals = try fields_anon_decl.arena().alloc(Value, tuple.types.len); + for ( + tuple.types, + tuple.values, + struct_field_vals, + 0.., + ) |field_ty, field_val, *struct_field_val, i| { + const name_val = v: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + const bytes = if (tuple.names.len != 0) + // https://github.com/ziglang/zig/issues/15709 + @as([]const u8, mod.intern_pool.stringToSlice(tuple.names[i])) + else + try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); + const new_decl = try anon_decl.finish( + try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), + try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + 0, // default alignment + ); + break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ + .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), + .len = try mod.intValue(Type.usize, bytes.len), + }); + }; - for (struct_field_vals, 0..) |*field_val, i| { - const field = struct_fields.values()[i]; - const name = struct_fields.keys()[i]; + const struct_field_fields = try fields_anon_decl.arena().create([5]Value); + const is_comptime = field_val != .none; + const opt_default_val = if (is_comptime) field_val.toValue() else null; + const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val); + struct_field_fields.* = .{ + // name: []const u8, + name_val, + // type: type, + field_ty.toValue(), + // default_value: ?*const anyopaque, + try default_val_ptr.copy(fields_anon_decl.arena()), + // is_comptime: bool, + Value.makeBool(is_comptime), + // alignment: comptime_int, + try field_ty.toType().lazyAbiAlignment(mod, fields_anon_decl.arena()), + }; + struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); + } + break :fv struct_field_vals; + }, + .struct_type => |s| s, + else => unreachable, + }; + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse + break :fv &[0]Value{}; + const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_obj.fields.count()); + + for ( + struct_field_vals, + struct_obj.fields.keys(), + struct_obj.fields.values(), + ) |*field_val, name, field| { const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -18013,7 +18046,7 @@ fn zirStructInit( try sema.requireRuntimeBlock(block, src, null); try sema.queueFullTypeResolution(resolved_ty); return block.addUnionInit(resolved_ty, field_index, init_inst); - } else if (resolved_ty.isAnonStruct()) { + } else if (resolved_ty.isAnonStruct(mod)) { return sema.fail(block, src, "TODO anon struct init validation", .{}); } unreachable; @@ -18034,60 +18067,54 @@ fn finishStructInit( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - if (struct_ty.isAnonStruct()) { - const struct_obj = struct_ty.castTag(.anon_struct).?.data; - for (struct_obj.values, 0..) |default_val, i| { - if (field_inits[i] != .none) continue; - - if (default_val.ip_index == .unreachable_value) { - const field_name = struct_obj.names[i]; - const template = "missing struct field: {s}"; - const args = .{field_name}; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, args); - } else { - root_msg = try sema.errMsg(block, init_src, template, args); - } - } else { - field_inits[i] = try sema.addConstant(struct_obj.types[i], default_val); - } - } - } else if (struct_ty.isTuple(mod)) { - var i: u32 = 0; - const len = struct_ty.structFieldCount(mod); - while (i < len) : (i += 1) { - if (field_inits[i] != .none) continue; + switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .anon_struct_type => |anon_struct| { + for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| { + if (field_inits[i] != .none) continue; - const default_val = struct_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { - const template = "missing tuple field with index {d}"; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, .{i}); + if (default_val == .none) { + if (anon_struct.names.len == 0) { + const template = "missing tuple field with index {d}"; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, .{i}); + } else { + root_msg = try sema.errMsg(block, init_src, template, .{i}); + } + } else { + const field_name = mod.intern_pool.stringToSlice(anon_struct.names[i]); + const template = "missing struct field: {s}"; + const args = .{field_name}; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, args); + } else { + root_msg = try sema.errMsg(block, init_src, template, args); + } + } } else { - root_msg = try sema.errMsg(block, init_src, template, .{i}); + field_inits[i] = try sema.addConstant(field_ty.toType(), default_val.toValue()); } - } else { - field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i, mod), default_val); } - } - } else { - const struct_obj = mod.typeToStruct(struct_ty).?; - for (struct_obj.fields.values(), 0..) |field, i| { - if (field_inits[i] != .none) continue; + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + for (struct_obj.fields.values(), 0..) |field, i| { + if (field_inits[i] != .none) continue; - if (field.default_val.ip_index == .unreachable_value) { - const field_name = struct_obj.fields.keys()[i]; - const template = "missing struct field: {s}"; - const args = .{field_name}; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, args); + if (field.default_val.ip_index == .unreachable_value) { + const field_name = struct_obj.fields.keys()[i]; + const template = "missing struct field: {s}"; + const args = .{field_name}; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, args); + } else { + root_msg = try sema.errMsg(block, init_src, template, args); + } } else { - root_msg = try sema.errMsg(block, init_src, template, args); + field_inits[i] = try sema.addConstant(field.ty, field.default_val); } - } else { - field_inits[i] = try sema.addConstant(field.ty, field.default_val); } - } + }, + else => unreachable, } if (root_msg) |msg| { @@ -18159,31 +18186,33 @@ fn zirStructInitAnon( is_ref: bool, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); - const types = try sema.arena.alloc(Type, extra.data.fields_len); - const values = try sema.arena.alloc(Value, types.len); - var fields = std.StringArrayHashMapUnmanaged(u32){}; - defer fields.deinit(sema.gpa); - try fields.ensureUnusedCapacity(sema.gpa, types.len); + const types = try sema.arena.alloc(InternPool.Index, extra.data.fields_len); + const values = try sema.arena.alloc(InternPool.Index, types.len); + var fields = std.AutoArrayHashMap(InternPool.NullTerminatedString, u32).init(sema.arena); + try fields.ensureUnusedCapacity(types.len); // Find which field forces the expression to be runtime, if any. const opt_runtime_index = rs: { var runtime_index: ?usize = null; var extra_index = extra.end; - for (types, 0..) |*field_ty, i| { + for (types, 0..) |*field_ty, i_usize| { + const i = @intCast(u32, i_usize); const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; const name = sema.code.nullTerminatedString(item.data.field_name); - const gop = fields.getOrPutAssumeCapacity(name); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + const gop = fields.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const prev_source = mod.initSrc(src.node_offset.x, decl, gop.value_ptr.*); try sema.errNote(block, prev_source, msg, "other field here", .{}); @@ -18191,41 +18220,44 @@ fn zirStructInitAnon( }; return sema.failWithOwnedErrorMsg(msg); } - gop.value_ptr.* = @intCast(u32, i); + gop.value_ptr.* = i; const init = try sema.resolveInst(item.data.init); - field_ty.* = sema.typeOf(init); - if (types[i].zigTypeTag(mod) == .Opaque) { + field_ty.* = sema.typeOf(init).ip_index; + if (types[i].toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, types[i]); + try sema.addDeclaredHereNote(msg, types[i].toType()); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(init)) |init_val| { - values[i] = init_val; + values[i] = init_val.ip_index; } else { - values[i] = Value.@"unreachable"; + values[i] = .none; runtime_index = i; } } break :rs runtime_index; }; - const tuple_ty = try Type.Tag.anon_struct.create(sema.arena, .{ - .names = try sema.arena.dupe([]const u8, fields.keys()), + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ + .names = fields.keys(), .types = types, .values = values, - }); + } }); const runtime_index = opt_runtime_index orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, tuple_ty, tuple_val, is_ref); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .fields = values, + } }); + return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref); }; sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { @@ -18241,7 +18273,7 @@ fn zirStructInitAnon( if (is_ref) { const target = sema.mod.getTarget(); const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = tuple_ty, + .pointee_type = tuple_ty.toType(), .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); @@ -18254,9 +18286,9 @@ fn zirStructInitAnon( const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = field_ty, + .pointee_type = field_ty.toType(), }); - if (values[i].ip_index == .unreachable_value) { + if (values[i] == .none) { const init = try sema.resolveInst(item.data.init); const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, init); @@ -18274,7 +18306,7 @@ fn zirStructInitAnon( element_refs[i] = try sema.resolveInst(item.data.init); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayInit( @@ -18400,43 +18432,47 @@ fn zirArrayInitAnon( const operands = sema.code.refSlice(extra.end, extra.data.operands_len); const mod = sema.mod; - const types = try sema.arena.alloc(Type, operands.len); - const values = try sema.arena.alloc(Value, operands.len); + const types = try sema.arena.alloc(InternPool.Index, operands.len); + const values = try sema.arena.alloc(InternPool.Index, operands.len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (operands, 0..) |operand, i| { const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); - types[i] = sema.typeOf(elem); - if (types[i].zigTypeTag(mod) == .Opaque) { + types[i] = sema.typeOf(elem).ip_index; + if (types[i].toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, types[i]); + try sema.addDeclaredHereNote(msg, types[i].toType()); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(elem)) |val| { - values[i] = val; + values[i] = val.ip_index; } else { - values[i] = Value.@"unreachable"; + values[i] = .none; runtime_src = operand_src; } } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, tuple_ty, tuple_val, is_ref); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .fields = values, + } }); + return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -18444,7 +18480,7 @@ fn zirArrayInitAnon( if (is_ref) { const target = sema.mod.getTarget(); const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = tuple_ty, + .pointee_type = tuple_ty.toType(), .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); @@ -18453,9 +18489,9 @@ fn zirArrayInitAnon( const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = types[i], + .pointee_type = types[i].toType(), }); - if (values[i].ip_index == .unreachable_value) { + if (values[i] == .none) { const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand)); } @@ -18469,7 +18505,7 @@ fn zirArrayInitAnon( element_refs[i] = try sema.resolveInst(operand); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn addConstantMaybeRef( @@ -18532,15 +18568,18 @@ fn fieldType( const resolved_ty = try sema.resolveTypeFields(cur_ty); cur_ty = resolved_ty; switch (cur_ty.zigTypeTag(mod)) { - .Struct => { - if (cur_ty.isAnonStruct()) { + .Struct => switch (mod.intern_pool.indexToKey(cur_ty.ip_index)) { + .anon_struct_type => |anon_struct| { const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); - return sema.addType(cur_ty.tupleFields().types[field_index]); - } - const struct_obj = mod.typeToStruct(cur_ty).?; - const field = struct_obj.fields.get(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); - return sema.addType(field.ty); + return sema.addType(anon_struct.types[field_index].toType()); + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field = struct_obj.fields.get(field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); + return sema.addType(field.ty); + }, + else => unreachable, }, .Union => { const union_obj = mod.typeToUnion(cur_ty).?; @@ -24697,7 +24736,7 @@ fn structFieldPtr( } const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); - } else if (struct_ty.isAnonStruct()) { + } else if (struct_ty.isAnonStruct(mod)) { const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); } @@ -24721,11 +24760,11 @@ fn structFieldPtrByIndex( struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - if (struct_ty.isAnonStruct()) { + const mod = sema.mod; + if (struct_ty.isAnonStruct(mod)) { return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); } - const mod = sema.mod; const struct_obj = mod.typeToStruct(struct_ty).?; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); @@ -24830,45 +24869,42 @@ fn structFieldVal( assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - switch (struct_ty.ip_index) { - .empty_struct_type => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty), - .none => switch (struct_ty.tag()) { - .tuple => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty), - .anon_struct => { - const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); - return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); + switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); - const field_index_usize = struct_obj.fields.getIndex(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); - const field_index = @intCast(u32, field_index_usize); - const field = struct_obj.fields.values()[field_index]; - - if (field.is_comptime) { - return sema.addConstant(field.ty, field.default_val); - } + const field_index_usize = struct_obj.fields.getIndex(field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); + const field_index = @intCast(u32, field_index_usize); + const field = struct_obj.fields.values()[field_index]; - if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { - if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty); - if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { - return sema.addConstant(field.ty, opv); - } + if (field.is_comptime) { + return sema.addConstant(field.ty, field.default_val); + } - const field_values = struct_val.castTag(.aggregate).?.data; - return sema.addConstant(field.ty, field_values[field_index]); + if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { + if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty); + if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { + return sema.addConstant(field.ty, opv); } - try sema.requireRuntimeBlock(block, src, null); - return block.addStructFieldVal(struct_byval, field_index, field.ty); - }, - else => unreachable, + const field_values = struct_val.castTag(.aggregate).?.data; + return sema.addConstant(field.ty, field_values[field_index]); + } + + try sema.requireRuntimeBlock(block, src, null); + return block.addStructFieldVal(struct_byval, field_index, field.ty); + }, + .anon_struct_type => |anon_struct| { + if (anon_struct.names.len == 0) { + return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); + } else { + const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); + return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); + } }, + else => unreachable, } } @@ -25931,7 +25967,7 @@ fn coerceExtra( .Union => { // pointer to anonymous struct to pointer to union if (inst_ty.isSinglePointer(mod) and - inst_ty.childType(mod).isAnonStruct() and + inst_ty.childType(mod).isAnonStruct(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25940,7 +25976,7 @@ fn coerceExtra( .Struct => { // pointer to anonymous struct to pointer to struct if (inst_ty.isSinglePointer(mod) and - inst_ty.childType(mod).isAnonStruct() and + inst_ty.childType(mod).isAnonStruct(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) { @@ -26231,7 +26267,7 @@ fn coerceExtra( .Union => switch (inst_ty.zigTypeTag(mod)) { .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { - if (inst_ty.isAnonStruct()) { + if (inst_ty.isAnonStruct(mod)) { return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -28771,8 +28807,8 @@ fn coerceAnonStructToUnion( return sema.failWithOwnedErrorMsg(msg); } - const anon_struct = inst_ty.castTag(.anon_struct).?.data; - const field_name = anon_struct.names[0]; + const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const field_name = mod.intern_pool.stringToSlice(anon_struct.names[0]); const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); } @@ -29010,13 +29046,14 @@ fn coerceTupleToStruct( @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); + const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; var runtime_src: ?LazySrcLoc = null; - const field_count = inst_ty.structFieldCount(mod); - var field_i: u32 = 0; - while (field_i < field_count) : (field_i += 1) { + for (0..anon_struct.types.len) |field_index_usize| { + const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = if (inst_ty.castTag(.anon_struct)) |payload| - payload.data.names[field_i] + const field_name = if (anon_struct.names.len != 0) + // https://github.com/ziglang/zig/issues/15709 + @as([]const u8, mod.intern_pool.stringToSlice(anon_struct.names[field_i])) else try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src); @@ -29094,21 +29131,22 @@ fn coerceTupleToTuple( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; - const dest_field_count = tuple_ty.structFieldCount(mod); - const field_vals = try sema.arena.alloc(Value, dest_field_count); + const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.ip_index).anon_struct_type; + const field_vals = try sema.arena.alloc(InternPool.Index, dest_tuple.types.len); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const inst_field_count = inst_ty.structFieldCount(mod); - if (inst_field_count > dest_field_count) return error.NotCoercible; + const src_tuple = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + if (src_tuple.types.len > dest_tuple.types.len) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; - var field_i: u32 = 0; - while (field_i < inst_field_count) : (field_i += 1) { + for (dest_tuple.types, dest_tuple.values, 0..) |field_ty, default_val, field_index_usize| { + const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = if (inst_ty.castTag(.anon_struct)) |payload| - payload.data.names[field_i] + const field_name = if (src_tuple.names.len != 0) + // https://github.com/ziglang/zig/issues/15709 + @as([]const u8, mod.intern_pool.stringToSlice(src_tuple.names[field_i])) else try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); @@ -29118,23 +29156,21 @@ fn coerceTupleToTuple( const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); - const field_ty = tuple_ty.structFieldType(field_i, mod); - const default_val = tuple_ty.structFieldDefaultValue(field_i, mod); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); - const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); + const coerced = try sema.coerce(block, field_ty.toType(), elem_ref, field_src); field_refs[field_index] = coerced; - if (default_val.ip_index != .unreachable_value) { + if (default_val != .none) { const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_val, field_ty, sema.mod)) { + if (!init_val.eql(default_val.toValue(), field_ty.toType(), sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - field_vals[field_index] = field_val; + field_vals[field_index] = field_val.ip_index; } else { runtime_src = field_src; } @@ -29145,14 +29181,16 @@ fn coerceTupleToTuple( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - for (field_refs, 0..) |*field_ref, i| { + for ( + dest_tuple.types, + dest_tuple.values, + field_refs, + 0.., + ) |field_ty, default_val, *field_ref, i| { if (field_ref.* != .none) continue; - const default_val = tuple_ty.structFieldDefaultValue(i, mod); - const field_ty = tuple_ty.structFieldType(i, mod); - const field_src = inst_src; // TODO better source location - if (default_val.ip_index == .unreachable_value) { + if (default_val == .none) { if (tuple_ty.isTuple(mod)) { const template = "missing tuple field: {d}"; if (root_msg) |msg| { @@ -29174,7 +29212,7 @@ fn coerceTupleToTuple( if (runtime_src == null) { field_vals[i] = default_val; } else { - field_ref.* = try sema.addConstant(field_ty, default_val); + field_ref.* = try sema.addConstant(field_ty.toType(), default_val.toValue()); } } @@ -29191,7 +29229,10 @@ fn coerceTupleToTuple( return sema.addConstant( tuple_ty, - try Value.Tag.aggregate.create(sema.arena, field_vals), + (try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty.ip_index, + .fields = field_vals, + } })).toValue(), ); } @@ -31591,17 +31632,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return sema.resolveTypeRequiresComptime(ty.optionalChild(mod)); }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; - if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) { - return true; - } - } - return false; - }, - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -31690,6 +31720,16 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { } }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, field_val| { + const have_comptime_val = field_val != .none; + if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty.toType())) { + return true; + } + } + return false; + }, + .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); switch (union_obj.requires_comptime) { @@ -31740,20 +31780,16 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { return sema.resolveTypeFully(child_ty); }, .Struct => switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - + .none => {}, // TODO make this unreachable when all types are migrated to InternPool + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => return sema.resolveStructFully(ty), + .anon_struct_type => |tuple| { for (tuple.types) |field_ty| { - try sema.resolveTypeFully(field_ty); + try sema.resolveTypeFully(field_ty.toType()); } }, else => {}, }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => return sema.resolveStructFully(ty), - else => {}, - }, }, .Union => return sema.resolveUnionFully(ty), .Array => return sema.resolveTypeFully(ty.childType(mod)), @@ -33038,17 +33074,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |val, i| { - const is_comptime = val.ip_index != .unreachable_value; - if (is_comptime) continue; - if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue; - return null; - } - return Value.empty_struct; - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -33150,7 +33175,36 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } } } - // In this case the struct has no fields and therefore has one possible value. + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + + // TODO: this is incorrect for structs with comptime fields, I think + // we should use a temporary allocator to construct an aggregate that + // is populated with the comptime values and then intern that value here. + // This TODO is repeated for anon_struct_type below, as well as + // in the redundant implementation of one-possible-value in type.zig. + const empty = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .fields = &.{}, + } }); + return empty.toValue(); + }, + + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + const is_comptime = val != .none; + if (is_comptime) continue; + if ((try sema.typeHasOnePossibleValue(field_ty.toType())) != null) continue; + return null; + } + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + + // TODO: this is incorrect for structs with comptime fields, I think + // we should use a temporary allocator to construct an aggregate that + // is populated with the comptime values and then intern that value here. + // This TODO is repeated for struct_type above, as well as + // in the redundant implementation of one-possible-value in type.zig. const empty = try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, .fields = &.{}, @@ -33647,17 +33701,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return sema.typeRequiresComptime(ty.optionalChild(mod)); }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; - if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) { - return true; - } - } - return false; - }, - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -33752,6 +33795,15 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { }, } }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + const have_comptime_val = val != .none; + if (!have_comptime_val and try sema.typeRequiresComptime(field_ty.toType())) { + return true; + } + } + return false; + }, .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); @@ -33865,7 +33917,7 @@ fn structFieldIndex( ) !u32 { const mod = sema.mod; const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - if (struct_ty.isAnonStruct()) { + if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { const struct_obj = mod.typeToStruct(struct_ty).?; @@ -33882,9 +33934,10 @@ fn anonStructFieldIndex( field_name: []const u8, field_src: LazySrcLoc, ) !u32 { - const anon_struct = struct_ty.castTag(.anon_struct).?.data; + const mod = sema.mod; + const anon_struct = mod.intern_pool.indexToKey(struct_ty.ip_index).anon_struct_type; for (anon_struct.names, 0..) |name, i| { - if (mem.eql(u8, name, field_name)) { + if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { return @intCast(u32, i); } } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index a18f49b96f..ced20ac522 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -177,13 +177,16 @@ pub fn print( } if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { - switch (field_ptr.container_ty.tag()) { - .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), - else => { - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod); - return writer.print(".{s}", .{field_name}); + switch (mod.intern_pool.indexToKey(field_ptr.container_ty.ip_index)) { + .anon_struct_type => |anon_struct| { + if (anon_struct.names.len == 0) { + return writer.print(".@\"{d}\"", .{field_ptr.field_index}); + } }, + else => {}, } + const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod); + return writer.print(".{s}", .{field_name}); } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { const field_name = field_ptr.container_ty.unionFields(mod).keys()[field_ptr.field_index]; return writer.print(".{s}", .{field_name}); @@ -396,12 +399,9 @@ fn printAggregate( while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); switch (ty.ip_index) { - .none => switch (ty.tag()) { - .anon_struct => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), - else => {}, - }, + .none => {}, // TODO make this unreachable after finishing InternPool migration else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), + .struct_type, .anon_struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), else => {}, }, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 7e2e37667e..30c3248360 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -11411,7 +11411,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; const tag_ty = union_obj.tag_ty; - const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const field_index = tag_ty.enumFieldIndex(field_name, mod).?; const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); const tag_int_val = try tag_val.enumToInt(tag_ty, mod); const tag_int = tag_int_val.toUnsignedInt(mod); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2ee7dab2fe..f45c178223 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -3417,8 +3417,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const op_inst = Air.refToIndex(un_op); const op_ty = f.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType(mod) else op_ty; - var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); + const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod); if (op_inst != null and f.air.instructions.items(.tag)[op_inst.?] == .call_always_tail) { try reap(f, inst, &.{un_op}); @@ -4115,8 +4114,7 @@ fn airCall( } resolved_arg.* = try f.resolveInst(arg); if (arg_cty != try f.typeToIndex(arg_ty, .complete)) { - var lowered_arg_buf: LowerFnRetTyBuffer = undefined; - const lowered_arg_ty = lowerFnRetTy(arg_ty, &lowered_arg_buf, mod); + const lowered_arg_ty = try lowerFnRetTy(arg_ty, mod); const array_local = try f.allocLocal(inst, lowered_arg_ty); try writer.writeAll("memcpy("); @@ -4146,8 +4144,7 @@ fn airCall( }; const ret_ty = fn_ty.fnReturnType(); - var lowered_ret_buf: LowerFnRetTyBuffer = undefined; - const lowered_ret_ty = lowerFnRetTy(ret_ty, &lowered_ret_buf, mod); + const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod); const result_local = result: { if (modifier == .always_tail) { @@ -5200,7 +5197,7 @@ fn fieldLocation( const field_ty = container_ty.structFieldType(next_field_index, mod); if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - break .{ .field = if (container_ty.isSimpleTuple()) + break .{ .field = if (container_ty.isSimpleTuple(mod)) .{ .field = next_field_index } else .{ .identifier = container_ty.structFieldName(next_field_index, mod) } }; @@ -5395,16 +5392,11 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const field_name: CValue = switch (struct_ty.ip_index) { .none => switch (struct_ty.tag()) { - .tuple, .anon_struct => if (struct_ty.isSimpleTuple()) - .{ .field = extra.field_index } - else - .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, - else => unreachable, }, else => switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { .struct_type => switch (struct_ty.containerLayout(mod)) { - .Auto, .Extern => if (struct_ty.isSimpleTuple()) + .Auto, .Extern => if (struct_ty.isSimpleTuple(mod)) .{ .field = extra.field_index } else .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, @@ -5465,6 +5457,12 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { return local; }, }, + + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0) + .{ .field = extra.field_index } + else + .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + .union_type => |union_type| field_name: { const union_obj = mod.unionPtr(union_type.index); if (union_obj.layout == .Packed) { @@ -6791,7 +6789,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const a = try Assignment.start(f, writer, field_ty); - try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple()) + try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod)) .{ .field = field_i } else .{ .identifier = inst_ty.structFieldName(field_i, mod) }); @@ -7704,25 +7702,21 @@ const Vectorize = struct { } }; -const LowerFnRetTyBuffer = struct { - names: [1][]const u8, - types: [1]Type, - values: [1]Value, - payload: Type.Payload.AnonStruct, -}; -fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, mod: *Module) Type { - if (ret_ty.zigTypeTag(mod) == .NoReturn) return Type.noreturn; +fn lowerFnRetTy(ret_ty: Type, mod: *Module) !Type { + if (ret_ty.ip_index == .noreturn_type) return Type.noreturn; if (lowersToArray(ret_ty, mod)) { - buffer.names = [1][]const u8{"array"}; - buffer.types = [1]Type{ret_ty}; - buffer.values = [1]Value{Value.@"unreachable"}; - buffer.payload = .{ .data = .{ - .names = &buffer.names, - .types = &buffer.types, - .values = &buffer.values, - } }; - return Type.initPayload(&buffer.payload.base); + const names = [1]InternPool.NullTerminatedString{ + try mod.intern_pool.getOrPutString(mod.gpa, "array"), + }; + const types = [1]InternPool.Index{ret_ty.ip_index}; + const values = [1]InternPool.Index{.none}; + const interned = try mod.intern(.{ .anon_struct_type = .{ + .names = &names, + .types = &types, + .values = &values, + } }); + return interned.toType(); } return if (ret_ty.hasRuntimeBitsIgnoreComptime(mod)) ret_ty else Type.void; diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index bcb4b92228..b51d81a30b 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1951,7 +1951,7 @@ pub const CType = extern union { defer c_field_i += 1; fields_pl[c_field_i] = .{ - .name = try if (ty.isSimpleTuple()) + .name = try if (ty.isSimpleTuple(mod)) std.fmt.allocPrintZ(arena, "f{}", .{field_i}) else arena.dupeZ(u8, switch (zig_ty_tag) { @@ -2102,7 +2102,7 @@ pub const CType = extern union { .payload => unreachable, }) or !mem.eql( u8, - if (ty.isSimpleTuple()) + if (ty.isSimpleTuple(mod)) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), @@ -2224,7 +2224,7 @@ pub const CType = extern union { .global => .global, .payload => unreachable, }); - hasher.update(if (ty.isSimpleTuple()) + hasher.update(if (ty.isSimpleTuple(mod)) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable else switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index e485b58c35..3289d389b4 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2009,83 +2009,84 @@ pub const Object = struct { break :blk fwd_decl; }; - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - - var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; - defer di_fields.deinit(gpa); - - try di_fields.ensureUnusedCapacity(gpa, tuple.types.len); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; - - const field_size = field_ty.abiSize(mod); - const field_align = field_ty.abiAlignment(mod); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); - offset = field_offset + field_size; - - const field_name = if (ty.castTag(.anon_struct)) |payload| - try gpa.dupeZ(u8, payload.data.names[i]) - else - try std.fmt.allocPrintZ(gpa, "{d}", .{i}); - defer gpa.free(field_name); + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |tuple| { + var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; + defer di_fields.deinit(gpa); + + try di_fields.ensureUnusedCapacity(gpa, tuple.types.len); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; + + const field_size = field_ty.toType().abiSize(mod); + const field_align = field_ty.toType().abiAlignment(mod); + const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = field_offset + field_size; + + const field_name = if (tuple.names.len != 0) + mod.intern_pool.stringToSlice(tuple.names[i]) + else + try std.fmt.allocPrintZ(gpa, "{d}", .{i}); + defer gpa.free(field_name); + + try di_fields.append(gpa, dib.createMemberType( + fwd_decl.toScope(), + field_name, + null, // file + 0, // line + field_size * 8, // size in bits + field_align * 8, // align in bits + field_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(field_ty.toType(), .full), + )); + } - try di_fields.append(gpa, dib.createMemberType( - fwd_decl.toScope(), - field_name, + const full_di_ty = dib.createStructType( + compile_unit_scope, + name.ptr, null, // file 0, // line - field_size * 8, // size in bits - field_align * 8, // align in bits - field_offset * 8, // offset in bits + ty.abiSize(mod) * 8, // size in bits + ty.abiAlignment(mod) * 8, // align in bits 0, // flags - try o.lowerDebugType(field_ty, .full), - )); - } - - const full_di_ty = dib.createStructType( - compile_unit_scope, - name.ptr, - null, // file - 0, // line - ty.abiSize(mod) * 8, // size in bits - ty.abiAlignment(mod) * 8, // align in bits - 0, // flags - null, // derived from - di_fields.items.ptr, - @intCast(c_int, di_fields.items.len), - 0, // run time lang - null, // vtable holder - "", // unique id - ); - dib.replaceTemporary(fwd_decl, full_di_ty); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); - return full_di_ty; - } - - if (mod.typeToStruct(ty)) |struct_obj| { - if (!struct_obj.haveFieldTypes()) { - // This can happen if a struct type makes it all the way to - // flush() without ever being instantiated or referenced (even - // via pointer). The only reason we are hearing about it now is - // that it is being used as a namespace to put other debug types - // into. Therefore we can satisfy this by making an empty namespace, - // rather than changing the frontend to unnecessarily resolve the - // struct field types. - const owner_decl_index = ty.getOwnerDecl(mod); - const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); - dib.replaceTemporary(fwd_decl, struct_di_ty); - // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` - // means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module }); - return struct_di_ty; - } + null, // derived from + di_fields.items.ptr, + @intCast(c_int, di_fields.items.len), + 0, // run time lang + null, // vtable holder + "", // unique id + ); + dib.replaceTemporary(fwd_decl, full_di_ty); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module }); + return full_di_ty; + }, + .struct_type => |struct_type| s: { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s; + + if (!struct_obj.haveFieldTypes()) { + // This can happen if a struct type makes it all the way to + // flush() without ever being instantiated or referenced (even + // via pointer). The only reason we are hearing about it now is + // that it is being used as a namespace to put other debug types + // into. Therefore we can satisfy this by making an empty namespace, + // rather than changing the frontend to unnecessarily resolve the + // struct field types. + const owner_decl_index = ty.getOwnerDecl(mod); + const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index); + dib.replaceTemporary(fwd_decl, struct_di_ty); + // The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType` + // means we can't use `gop` anymore. + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module }); + return struct_di_ty; + } + }, + else => {}, } if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -2931,59 +2932,61 @@ pub const DeclGen = struct { // reference, we need to copy it here. gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - if (t.isSimpleTupleOrAnonStruct()) { - const tuple = t.tupleFields(); - const llvm_struct_ty = dg.context.structCreateNamed(""); - gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls + const struct_type = switch (mod.intern_pool.indexToKey(t.ip_index)) { + .anon_struct_type => |tuple| { + const llvm_struct_ty = dg.context.structCreateNamed(""); + gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls - var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{}; - defer llvm_field_types.deinit(gpa); + var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{}; + defer llvm_field_types.deinit(gpa); - try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len); + try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len); - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + for (tuple.types, tuple.values) |field_ty, field_val| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - try llvm_field_types.append(gpa, llvm_array_ty); - } - const field_llvm_ty = try dg.lowerType(field_ty); - try llvm_field_types.append(gpa, field_llvm_ty); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + try llvm_field_types.append(gpa, llvm_array_ty); + } + const field_llvm_ty = try dg.lowerType(field_ty.toType()); + try llvm_field_types.append(gpa, field_llvm_ty); - offset += field_ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - try llvm_field_types.append(gpa, llvm_array_ty); + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + try llvm_field_types.append(gpa, llvm_array_ty); + } } - } - llvm_struct_ty.structSetBody( - llvm_field_types.items.ptr, - @intCast(c_uint, llvm_field_types.items.len), - .False, - ); + llvm_struct_ty.structSetBody( + llvm_field_types.items.ptr, + @intCast(c_uint, llvm_field_types.items.len), + .False, + ); - return llvm_struct_ty; - } + return llvm_struct_ty; + }, + .struct_type => |struct_type| struct_type, + else => unreachable, + }; - const struct_obj = mod.typeToStruct(t).?; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -3625,71 +3628,74 @@ pub const DeclGen = struct { const field_vals = tv.val.castTag(.aggregate).?.data; const gpa = dg.gpa; - if (tv.ty.isSimpleTupleOrAnonStruct()) { - const tuple = tv.ty.tupleFields(); - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .anon_struct_type => |tuple| { + var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; + defer llvm_fields.deinit(gpa); - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; - for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].ip_index != .unreachable_value) continue; - if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - - const field_align = field_ty.abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - const field_llvm_val = try dg.lowerValue(.{ - .ty = field_ty, - .val = field_vals[i], - }); + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty, field_llvm_val); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - llvm_fields.appendAssumeCapacity(field_llvm_val); + const field_llvm_val = try dg.lowerValue(.{ + .ty = field_ty.toType(), + .val = field_vals[i], + }); - offset += field_ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - } + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } + }, + .struct_type => |struct_type| struct_type, + else => unreachable, + }; - const struct_obj = mod.typeToStruct(tv.ty).?; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.layout == .Packed) { assert(struct_obj.haveLayout()); @@ -4077,13 +4083,11 @@ pub const DeclGen = struct { return field_addr.constIntToPtr(final_llvm_ty); } - var ty_buf: Type.Payload.Pointer = undefined; - const parent_llvm_ty = try dg.lowerType(parent_ty); - if (llvmFieldIndex(parent_ty, field_index, mod, &ty_buf)) |llvm_field_index| { + if (llvmField(parent_ty, field_index, mod)) |llvm_field| { const indices: [2]*llvm.Value = .{ llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_field_index, .False), + llvm_u32.constInt(llvm_field.index, .False), }; return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); } else { @@ -6006,8 +6010,7 @@ pub const FuncGen = struct { return self.builder.buildTrunc(shifted_value, elem_llvm_ty, ""); }, else => { - var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; + const llvm_field_index = llvmField(struct_ty, field_index, mod).?.index; return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, ""); }, }, @@ -6035,16 +6038,22 @@ pub const FuncGen = struct { switch (struct_ty.zigTypeTag(mod)) { .Struct => { assert(struct_ty.containerLayout(mod) != .Packed); - var ptr_ty_buf: Type.Payload.Pointer = undefined; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; + const llvm_field = llvmField(struct_ty, field_index, mod).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); - const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field_index, ""); - const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); + const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); + const field_ptr_ty = try mod.ptrType(.{ + .elem_type = llvm_field.ty.ip_index, + .alignment = llvm_field.alignment, + }); if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; - return self.loadByRef(field_ptr, field_ty, ptr_ty_buf.data.alignment(mod), false); + const field_alignment = if (llvm_field.alignment != 0) + llvm_field.alignment + else + llvm_field.ty.abiAlignment(mod); + return self.loadByRef(field_ptr, field_ty, field_alignment, false); } else { return self.load(field_ptr, field_ptr_ty); } @@ -6912,12 +6921,14 @@ pub const FuncGen = struct { const struct_ty = self.air.getRefType(ty_pl.ty); const field_index = ty_pl.payload; - var ptr_ty_buf: Type.Payload.Pointer = undefined; const mod = self.dg.module; - const llvm_field_index = llvmFieldIndex(struct_ty, field_index, mod, &ptr_ty_buf).?; + const llvm_field = llvmField(struct_ty, field_index, mod).?; const struct_llvm_ty = try self.dg.lowerType(struct_ty); - const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field_index, ""); - const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base); + const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); + const field_ptr_ty = try mod.ptrType(.{ + .elem_type = llvm_field.ty.ip_index, + .alignment = llvm_field.alignment, + }); return self.load(field_ptr, field_ptr_ty); } @@ -7430,9 +7441,8 @@ pub const FuncGen = struct { const result = self.builder.buildExtractValue(result_struct, 0, ""); const overflow_bit = self.builder.buildExtractValue(result_struct, 1, ""); - var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?; + const result_index = llvmField(dest_ty, 0, mod).?.index; + const overflow_index = llvmField(dest_ty, 1, mod).?.index; if (isByRef(dest_ty, mod)) { const result_alignment = dest_ty.abiAlignment(mod); @@ -7736,9 +7746,8 @@ pub const FuncGen = struct { const overflow_bit = self.builder.buildICmp(.NE, lhs, reconstructed, ""); - var ty_buf: Type.Payload.Pointer = undefined; - const result_index = llvmFieldIndex(dest_ty, 0, mod, &ty_buf).?; - const overflow_index = llvmFieldIndex(dest_ty, 1, mod, &ty_buf).?; + const result_index = llvmField(dest_ty, 0, mod).?.index; + const overflow_index = llvmField(dest_ty, 1, mod).?.index; if (isByRef(dest_ty, mod)) { const result_alignment = dest_ty.abiAlignment(mod); @@ -9300,8 +9309,6 @@ pub const FuncGen = struct { return running_int; } - var ptr_ty_buf: Type.Payload.Pointer = undefined; - if (isByRef(result_ty, mod)) { const llvm_u32 = self.context.intType(32); // TODO in debug builds init to undef so that the padding will be 0xaa @@ -9313,7 +9320,7 @@ pub const FuncGen = struct { if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; + const llvm_i = llvmField(result_ty, i, mod).?.index; indices[1] = llvm_u32.constInt(llvm_i, .False); const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); var field_ptr_payload: Type.Payload.Pointer = .{ @@ -9334,7 +9341,7 @@ pub const FuncGen = struct { if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue; const llvm_elem = try self.resolveInst(elem); - const llvm_i = llvmFieldIndex(result_ty, i, mod, &ptr_ty_buf).?; + const llvm_i = llvmField(result_ty, i, mod).?.index; result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, ""); } return result; @@ -9796,9 +9803,8 @@ pub const FuncGen = struct { else => { const struct_llvm_ty = try self.dg.lowerPtrElemTy(struct_ty); - var ty_buf: Type.Payload.Pointer = undefined; - if (llvmFieldIndex(struct_ty, field_index, mod, &ty_buf)) |llvm_field_index| { - return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field_index, ""); + if (llvmField(struct_ty, field_index, mod)) |llvm_field| { + return self.builder.buildStructGEP(struct_llvm_ty, struct_ptr, llvm_field.index, ""); } else { // If we found no index then this means this is a zero sized field at the // end of the struct. Treat our struct pointer as an array of two and get @@ -10457,59 +10463,61 @@ fn toLlvmGlobalAddressSpace(wanted_address_space: std.builtin.AddressSpace, targ }; } +const LlvmField = struct { + index: c_uint, + ty: Type, + alignment: u32, +}; + /// Take into account 0 bit fields and padding. Returns null if an llvm /// field could not be found. /// This only happens if you want the field index of a zero sized field at /// the end of the struct. -fn llvmFieldIndex( - ty: Type, - field_index: usize, - mod: *Module, - ptr_pl_buf: *Type.Payload.Pointer, -) ?c_uint { +fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { // Detects where we inserted extra padding fields so that we can skip // over them in this function. comptime assert(struct_layout_version == 2); var offset: u64 = 0; var big_align: u32 = 0; - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - var llvm_field_index: c_uint = 0; - for (tuple.types, 0..) |field_ty, i| { - if (tuple.values[i].ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) continue; + const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |tuple| { + var llvm_field_index: c_uint = 0; + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; - const field_align = field_ty.abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - llvm_field_index += 1; - } + const padding_len = offset - prev_offset; + if (padding_len > 0) { + llvm_field_index += 1; + } - if (field_index <= i) { - ptr_pl_buf.* = .{ - .data = .{ - .pointee_type = field_ty, - .@"align" = field_align, - .@"addrspace" = .generic, - }, - }; - return llvm_field_index; - } + if (field_index <= i) { + return .{ + .index = llvm_field_index, + .ty = field_ty.toType(), + .alignment = field_align, + }; + } - llvm_field_index += 1; - offset += field_ty.abiSize(mod); - } - return null; - } - const layout = ty.containerLayout(mod); + llvm_field_index += 1; + offset += field_ty.toType().abiSize(mod); + } + return null; + }, + .struct_type => |s| s, + else => unreachable, + }; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const layout = struct_obj.layout; assert(layout != .Packed); var llvm_field_index: c_uint = 0; - var it = mod.typeToStruct(ty).?.runtimeFieldIterator(mod); + var it = struct_obj.runtimeFieldIterator(mod); while (it.next()) |field_and_index| { const field = field_and_index.field; const field_align = field.alignment(mod, layout); @@ -10523,14 +10531,11 @@ fn llvmFieldIndex( } if (field_index == field_and_index.index) { - ptr_pl_buf.* = .{ - .data = .{ - .pointee_type = field.ty, - .@"align" = field_align, - .@"addrspace" = .generic, - }, + return .{ + .index = llvm_field_index, + .ty = field.ty, + .alignment = field_align, }; - return llvm_field_index; } llvm_field_index += 1; @@ -11089,21 +11094,24 @@ fn isByRef(ty: Type, mod: *Module) bool { .Struct => { // Packed structs are represented to LLVM as integers. if (ty.containerLayout(mod) == .Packed) return false; - if (ty.isSimpleTupleOrAnonStruct()) { - const tuple = ty.tupleFields(); - var count: usize = 0; - for (tuple.values, 0..) |field_val, i| { - if (field_val.ip_index != .unreachable_value or !tuple.types[i].hasRuntimeBits(mod)) continue; - - count += 1; - if (count > max_fields_byval) return true; - if (isByRef(tuple.types[i], mod)) return true; - } - return false; - } + const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |tuple| { + var count: usize = 0; + for (tuple.types, tuple.values) |field_ty, field_val| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) continue; + + count += 1; + if (count > max_fields_byval) return true; + if (isByRef(field_ty.toType(), mod)) return true; + } + return false; + }, + .struct_type => |s| s, + else => unreachable, + }; + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; var count: usize = 0; - const fields = ty.structFields(mod); - for (fields.values()) |field| { + for (struct_obj.fields.values()) |field| { if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; count += 1; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index a81e36fefa..32ea975b64 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -682,7 +682,7 @@ pub const DeclGen = struct { else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}), }, .Struct => { - if (ty.isSimpleTupleOrAnonStruct()) { + if (ty.isSimpleTupleOrAnonStruct(mod)) { unreachable; // TODO } else { const struct_ty = mod.typeToStruct(ty).?; @@ -1319,7 +1319,8 @@ pub const DeclGen = struct { defer self.gpa.free(member_names); var member_index: usize = 0; - for (struct_ty.fields.values(), 0..) |field, i| { + const struct_obj = void; // TODO + for (struct_obj.fields.values(), 0..) |field, i| { if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; member_types[member_index] = try self.resolveType(field.ty, .indirect); @@ -1327,7 +1328,7 @@ pub const DeclGen = struct { member_index += 1; } - const name = try struct_ty.getFullyQualifiedName(self.module); + const name = try struct_obj.getFullyQualifiedName(self.module); defer self.module.gpa.free(name); return try self.spv.resolve(.{ .struct_type = .{ @@ -2090,7 +2091,7 @@ pub const DeclGen = struct { var i: usize = 0; while (i < mask_len) : (i += 1) { - const elem = try mask.elemValue(self.module, i); + const elem = try mask.elemValue(mod, i); if (elem.isUndef(mod)) { self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF); } else { @@ -2805,7 +2806,7 @@ pub const DeclGen = struct { const value = try self.resolve(bin_op.rhs); const ptr_ty_ref = try self.resolveType(ptr_ty, .direct); - const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep() else false; + const val_is_undef = if (try self.air.value(bin_op.rhs, mod)) |val| val.isUndefDeep(mod) else false; if (val_is_undef) { const undef = try self.spv.constUndef(ptr_ty_ref); try self.store(ptr_ty, ptr, undef); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index e20e127800..b9722f8c95 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -333,13 +333,12 @@ pub const DeclState = struct { // DW.AT.byte_size, DW.FORM.udata try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod)); - switch (ty.tag()) { - .tuple, .anon_struct => { + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |fields| { // DW.AT.name, DW.FORM.string try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)}); - const fields = ty.tupleFields(); - for (fields.types, 0..) |field, field_index| { + for (fields.types, 0..) |field_ty, field_index| { // DW.AT.member try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member)); // DW.AT.name, DW.FORM.string @@ -347,28 +346,30 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index)); + try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata const field_off = ty.structFieldOffset(field_index, mod); try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, - else => { + .struct_type => |struct_type| s: { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s; // DW.AT.name, DW.FORM.string const struct_name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1); dbg_info_buffer.appendSliceAssumeCapacity(struct_name); dbg_info_buffer.appendAssumeCapacity(0); - const struct_obj = mod.typeToStruct(ty).?; if (struct_obj.layout == .Packed) { log.debug("TODO implement .debug_info for packed structs", .{}); break :blk; } - const fields = ty.structFields(mod); - for (fields.keys(), 0..) |field_name, field_index| { - const field = fields.get(field_name).?; + for ( + struct_obj.fields.keys(), + struct_obj.fields.values(), + 0.., + ) |field_name, field, field_index| { if (!field.ty.hasRuntimeBits(mod)) continue; // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2); @@ -385,6 +386,7 @@ pub const DeclState = struct { try leb128.writeULEB128(dbg_info_buffer.writer(), field_off); } }, + else => unreachable, } // DW.AT.structure_type delimit children diff --git a/src/type.zig b/src/type.zig index d051191bfe..ee9e7c8e17 100644 --- a/src/type.zig +++ b/src/type.zig @@ -54,10 +54,6 @@ pub const Type = struct { .error_union => return .ErrorUnion, .anyframe_T => return .AnyFrame, - - .tuple, - .anon_struct, - => return .Struct, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return .Int, @@ -66,7 +62,7 @@ pub const Type = struct { .vector_type => return .Vector, .opt_type => return .Optional, .error_union_type => return .ErrorUnion, - .struct_type => return .Struct, + .struct_type, .anon_struct_type => return .Struct, .union_type => return .Union, .opaque_type => return .Opaque, .enum_type => return .Enum, @@ -465,76 +461,6 @@ pub const Type = struct { if (b.zigTypeTag(mod) != .AnyFrame) return false; return a.elemType2(mod).eql(b.elemType2(mod), mod); }, - - .tuple => { - if (!b.isSimpleTuple()) return false; - - const a_tuple = a.tupleFields(); - const b_tuple = b.tupleFields(); - - if (a_tuple.types.len != b_tuple.types.len) return false; - - for (a_tuple.types, 0..) |a_ty, i| { - const b_ty = b_tuple.types[i]; - if (!eql(a_ty, b_ty, mod)) return false; - } - - for (a_tuple.values, 0..) |a_val, i| { - const ty = a_tuple.types[i]; - const b_val = b_tuple.values[i]; - if (a_val.ip_index == .unreachable_value) { - if (b_val.ip_index == .unreachable_value) { - continue; - } else { - return false; - } - } else { - if (b_val.ip_index == .unreachable_value) { - return false; - } else { - if (!Value.eql(a_val, b_val, ty, mod)) return false; - } - } - } - - return true; - }, - .anon_struct => { - const a_struct_obj = a.castTag(.anon_struct).?.data; - const b_struct_obj = (b.castTag(.anon_struct) orelse return false).data; - - if (a_struct_obj.types.len != b_struct_obj.types.len) return false; - - for (a_struct_obj.names, 0..) |a_name, i| { - const b_name = b_struct_obj.names[i]; - if (!std.mem.eql(u8, a_name, b_name)) return false; - } - - for (a_struct_obj.types, 0..) |a_ty, i| { - const b_ty = b_struct_obj.types[i]; - if (!eql(a_ty, b_ty, mod)) return false; - } - - for (a_struct_obj.values, 0..) |a_val, i| { - const ty = a_struct_obj.types[i]; - const b_val = b_struct_obj.values[i]; - if (a_val.ip_index == .unreachable_value) { - if (b_val.ip_index == .unreachable_value) { - continue; - } else { - return false; - } - } else { - if (b_val.ip_index == .unreachable_value) { - return false; - } else { - if (!Value.eql(a_val, b_val, ty, mod)) return false; - } - } - } - - return true; - }, } } @@ -641,34 +567,6 @@ pub const Type = struct { std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame); hashWithHasher(ty.childType(mod), hasher, mod); }, - - .tuple => { - std.hash.autoHash(hasher, std.builtin.TypeId.Struct); - - const tuple = ty.tupleFields(); - std.hash.autoHash(hasher, tuple.types.len); - - for (tuple.types, 0..) |field_ty, i| { - hashWithHasher(field_ty, hasher, mod); - const field_val = tuple.values[i]; - if (field_val.ip_index == .unreachable_value) continue; - field_val.hash(field_ty, hasher, mod); - } - }, - .anon_struct => { - const struct_obj = ty.castTag(.anon_struct).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.Struct); - std.hash.autoHash(hasher, struct_obj.types.len); - - for (struct_obj.types, 0..) |field_ty, i| { - const field_name = struct_obj.names[i]; - const field_val = struct_obj.values[i]; - hasher.update(field_name); - hashWithHasher(field_ty, hasher, mod); - if (field_val.ip_index == .unreachable_value) continue; - field_val.hash(field_ty, hasher, mod); - } - }, } } @@ -733,41 +631,6 @@ pub const Type = struct { }; }, - .tuple => { - const payload = self.castTag(.tuple).?.data; - const types = try allocator.alloc(Type, payload.types.len); - const values = try allocator.alloc(Value, payload.values.len); - for (payload.types, 0..) |ty, i| { - types[i] = try ty.copy(allocator); - } - for (payload.values, 0..) |val, i| { - values[i] = try val.copy(allocator); - } - return Tag.tuple.create(allocator, .{ - .types = types, - .values = values, - }); - }, - .anon_struct => { - const payload = self.castTag(.anon_struct).?.data; - const names = try allocator.alloc([]const u8, payload.names.len); - const types = try allocator.alloc(Type, payload.types.len); - const values = try allocator.alloc(Value, payload.values.len); - for (payload.names, 0..) |name, i| { - names[i] = try allocator.dupe(u8, name); - } - for (payload.types, 0..) |ty, i| { - types[i] = try ty.copy(allocator); - } - for (payload.values, 0..) |val, i| { - values[i] = try val.copy(allocator); - } - return Tag.anon_struct.create(allocator, .{ - .names = names, - .types = types, - .values = values, - }); - }, .function => { const payload = self.castTag(.function).?.data; const param_types = try allocator.alloc(Type, payload.param_types.len); @@ -935,42 +798,6 @@ pub const Type = struct { ty = return_type; continue; }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - try writer.writeAll("tuple{"); - for (tuple.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) { - try writer.writeAll("comptime "); - } - try field_ty.dump("", .{}, writer); - if (val.ip_index != .unreachable_value) { - try writer.print(" = {}", .{val.fmtDebug()}); - } - } - try writer.writeAll("}"); - return; - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - try writer.writeAll("struct{"); - for (anon_struct.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = anon_struct.values[i]; - if (val.ip_index != .unreachable_value) { - try writer.writeAll("comptime "); - } - try writer.writeAll(anon_struct.names[i]); - try writer.writeAll(": "); - try field_ty.dump("", .{}, writer); - if (val.ip_index != .unreachable_value) { - try writer.print(" = {}", .{val.fmtDebug()}); - } - } - try writer.writeAll("}"); - return; - }, .optional => { const child_type = ty.castTag(.optional).?.data; try writer.writeByte('?'); @@ -1131,45 +958,6 @@ pub const Type = struct { try print(error_union.payload, writer, mod); }, - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - - try writer.writeAll("tuple{"); - for (tuple.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) { - try writer.writeAll("comptime "); - } - try print(field_ty, writer, mod); - if (val.ip_index != .unreachable_value) { - try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); - } - } - try writer.writeAll("}"); - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - - try writer.writeAll("struct{"); - for (anon_struct.types, 0..) |field_ty, i| { - if (i != 0) try writer.writeAll(", "); - const val = anon_struct.values[i]; - if (val.ip_index != .unreachable_value) { - try writer.writeAll("comptime "); - } - try writer.writeAll(anon_struct.names[i]); - try writer.writeAll(": "); - - try print(field_ty, writer, mod); - - if (val.ip_index != .unreachable_value) { - try writer.print(" = {}", .{val.fmtValue(field_ty, mod)}); - } - } - try writer.writeAll("}"); - }, - .pointer => { const info = ty.ptrInfo(mod); @@ -1335,6 +1123,27 @@ pub const Type = struct { try writer.writeAll("@TypeOf(.{})"); } }, + .anon_struct_type => |anon_struct| { + try writer.writeAll("struct{"); + for (anon_struct.types, anon_struct.values, 0..) |field_ty, val, i| { + if (i != 0) try writer.writeAll(", "); + if (val != .none) { + try writer.writeAll("comptime "); + } + if (anon_struct.names.len != 0) { + const name = mod.intern_pool.stringToSlice(anon_struct.names[i]); + try writer.writeAll(name); + try writer.writeAll(": "); + } + + try print(field_ty.toType(), writer, mod); + + if (val != .none) { + try writer.print(" = {}", .{val.toValue().fmtValue(field_ty.toType(), mod)}); + } + } + try writer.writeAll("}"); + }, .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); @@ -1443,16 +1252,6 @@ pub const Type = struct { } }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) continue; // comptime field - if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; - } - return false; - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -1567,6 +1366,13 @@ pub const Type = struct { return false; } }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + if (val != .none) continue; // comptime field + if (try field_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat)) return true; + } + return false; + }, .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); @@ -1634,8 +1440,6 @@ pub const Type = struct { .function, .error_union, .anyframe_T, - .tuple, - .anon_struct, => false, .inferred_alloc_mut => unreachable, @@ -1705,6 +1509,7 @@ pub const Type = struct { }; return struct_obj.layout != .Auto; }, + .anon_struct_type => false, .union_type => |union_type| switch (union_type.runtime_tag) { .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, .tagged => false, @@ -1923,26 +1728,6 @@ pub const Type = struct { .optional => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - var big_align: u32 = 0; - for (tuple.types, 0..) |field_ty, i| { - const val = tuple.values[i]; - if (val.ip_index != .unreachable_value) continue; // comptime field - if (!(field_ty.hasRuntimeBits(mod))) continue; - - switch (try field_ty.abiAlignmentAdvanced(mod, strat)) { - .scalar => |field_align| big_align = @max(big_align, field_align), - .val => switch (strat) { - .eager => unreachable, // field type alignment not resolved - .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, - }, - } - } - return AbiAlignmentAdvanced{ .scalar = big_align }; - }, - .inferred_alloc_const, .inferred_alloc_mut, => unreachable, @@ -2100,6 +1885,24 @@ pub const Type = struct { } return AbiAlignmentAdvanced{ .scalar = big_align }; }, + .anon_struct_type => |tuple| { + var big_align: u32 = 0; + for (tuple.types, tuple.values) |field_ty, val| { + if (val != .none) continue; // comptime field + if (!(field_ty.toType().hasRuntimeBits(mod))) continue; + + switch (try field_ty.toType().abiAlignmentAdvanced(mod, strat)) { + .scalar => |field_align| big_align = @max(big_align, field_align), + .val => switch (strat) { + .eager => unreachable, // field type alignment not resolved + .sema => unreachable, // passed to abiAlignmentAdvanced above + .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + }, + } + } + return AbiAlignmentAdvanced{ .scalar = big_align }; + }, + .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); return abiAlignmentAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); @@ -2287,18 +2090,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .tuple, .anon_struct => { - switch (strat) { - .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy, .eager => {}, - } - const field_count = ty.structFieldCount(mod); - if (field_count == 0) { - return AbiSizeAdvanced{ .scalar = 0 }; - } - return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; - }, - .anyframe_T => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, .pointer => switch (ty.castTag(.pointer).?.data.size) { @@ -2496,6 +2287,18 @@ pub const Type = struct { return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; }, }, + .anon_struct_type => |tuple| { + switch (strat) { + .sema => |sema| try sema.resolveTypeLayout(ty), + .lazy, .eager => {}, + } + const field_count = tuple.types.len; + if (field_count == 0) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) }; + }, + .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); return abiSizeAdvancedUnion(ty, mod, strat, union_obj, union_type.hasTag()); @@ -2609,18 +2412,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .tuple, .anon_struct => { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout(mod) != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - var total: u64 = 0; - for (ty.tupleFields().types) |field_ty| { - total += try bitSizeAdvanced(field_ty, mod, opt_sema); - } - return total; - }, - .anyframe_T => return target.ptrBitWidth(), .pointer => switch (ty.castTag(.pointer).?.data.size) { @@ -2724,6 +2515,11 @@ pub const Type = struct { return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); }, + .anon_struct_type => { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + }, + .union_type => |union_type| { if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); if (ty.containerLayout(mod) != .Packed) { @@ -3220,23 +3016,17 @@ pub const Type = struct { } pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout { - return switch (ty.ip_index) { - .empty_struct_type => .Auto, - .none => switch (ty.tag()) { - .tuple, .anon_struct => .Auto, - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; + return struct_obj.layout; }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return .Auto; - return struct_obj.layout; - }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.layout; - }, - else => unreachable, + .anon_struct_type => .Auto, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.layout; }, + else => unreachable, }; } @@ -3349,23 +3139,16 @@ pub const Type = struct { } pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { - return switch (ty.ip_index) { - .empty_struct_type => 0, - .none => switch (ty.tag()) { - .tuple => ty.castTag(.tuple).?.data.types.len, - .anon_struct => ty.castTag(.anon_struct).?.data.types.len, - - else => unreachable, - }, - else => switch (ip.indexToKey(ty.ip_index)) { - .vector_type => |vector_type| vector_type.len, - .array_type => |array_type| array_type.len, - .struct_type => |struct_type| { - const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0; - return struct_obj.fields.count(); - }, - else => unreachable, + return switch (ip.indexToKey(ty.ip_index)) { + .vector_type => |vector_type| vector_type.len, + .array_type => |array_type| array_type.len, + .struct_type => |struct_type| { + const struct_obj = ip.structPtrUnwrapConst(struct_type.index) orelse return 0; + return struct_obj.fields.count(); }, + .anon_struct_type => |tuple| tuple.types.len, + + else => unreachable, }; } @@ -3374,16 +3157,10 @@ pub const Type = struct { } pub fn vectorLen(ty: Type, mod: *const Module) u32 { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len), - .anon_struct => @intCast(u32, ty.castTag(.anon_struct).?.data.types.len), - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .vector_type => |vector_type| vector_type.len, - else => unreachable, - }, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .vector_type => |vector_type| vector_type.len, + .anon_struct_type => |tuple| @intCast(u32, tuple.types.len), + else => unreachable, }; } @@ -3391,8 +3168,6 @@ pub const Type = struct { pub fn sentinel(ty: Type, mod: *const Module) ?Value { return switch (ty.ip_index) { .none => switch (ty.tag()) { - .tuple => null, - .pointer => ty.castTag(.pointer).?.data.sentinel, else => unreachable, @@ -3400,6 +3175,7 @@ pub const Type = struct { else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .vector_type, .struct_type, + .anon_struct_type, => null, .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, @@ -3486,10 +3262,12 @@ pub const Type = struct { ty = struct_obj.backing_int_ty; }, .enum_type => |enum_type| ty = enum_type.tag_ty.toType(), + .vector_type => |vector_type| ty = vector_type.child.toType(), + + .anon_struct_type => unreachable, .ptr_type => unreachable, .array_type => unreachable, - .vector_type => |vector_type| ty = vector_type.child.toType(), .opt_type => unreachable, .error_union_type => unreachable, @@ -3711,17 +3489,6 @@ pub const Type = struct { } }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |val, i| { - const is_comptime = val.ip_index != .unreachable_value; - if (is_comptime) continue; - if ((try tuple.types[i].onePossibleValue(mod)) != null) continue; - return null; - } - return Value.empty_struct; - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -3810,7 +3577,33 @@ pub const Type = struct { return null; } } - // In this case the struct has no fields and therefore has one possible value. + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + + // TODO: this is incorrect for structs with comptime fields, I think + // we should use a temporary allocator to construct an aggregate that + // is populated with the comptime values and then intern that value here. + // This TODO is repeated for anon_struct_type below, as well as in + // the redundant implementation of one-possible-value logic in Sema.zig. + const empty = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .fields = &.{}, + } }); + return empty.toValue(); + }, + + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + if (val != .none) continue; // comptime field + if ((try field_ty.toType().onePossibleValue(mod)) != null) continue; + return null; + } + + // TODO: this is incorrect for structs with comptime fields, I think + // we should use a temporary allocator to construct an aggregate that + // is populated with the comptime values and then intern that value here. + // This TODO is repeated for struct_type above, as well as in + // the redundant implementation of one-possible-value logic in Sema.zig. const empty = try mod.intern(.{ .aggregate = .{ .ty = ty.ip_index, .fields = &.{}, @@ -3915,15 +3708,6 @@ pub const Type = struct { return ty.optionalChild(mod).comptimeOnly(mod); }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].ip_index != .unreachable_value; - if (!have_comptime_val and field_ty.comptimeOnly(mod)) return true; - } - return false; - }, - .error_union => return ty.errorUnionPayload().comptimeOnly(mod), .anyframe_T => { const child_ty = ty.castTag(.anyframe_T).?.data; @@ -4007,6 +3791,14 @@ pub const Type = struct { } }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + const have_comptime_val = val != .none; + if (!have_comptime_val and field_ty.toType().comptimeOnly(mod)) return true; + } + return false; + }, + .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); switch (union_obj.requires_comptime) { @@ -4275,171 +4067,116 @@ pub const Type = struct { } pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) []const u8 { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .anon_struct => return ty.castTag(.anon_struct).?.data.names[field_index], - else => unreachable, + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.haveFieldTypes()); + return struct_obj.fields.keys()[field_index]; }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields.keys()[field_index]; - }, - else => unreachable, + .anon_struct_type => |anon_struct| { + const name = anon_struct.names[field_index]; + return mod.intern_pool.stringToSlice(name); }, + else => unreachable, } } pub fn structFieldCount(ty: Type, mod: *Module) usize { - return switch (ty.ip_index) { - .empty_struct_type => 0, - .none => switch (ty.tag()) { - .tuple => ty.castTag(.tuple).?.data.types.len, - .anon_struct => ty.castTag(.anon_struct).?.data.types.len, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; - assert(struct_obj.haveFieldTypes()); - return struct_obj.fields.count(); - }, - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; + assert(struct_obj.haveFieldTypes()); + return struct_obj.fields.count(); }, + .anon_struct_type => |anon_struct| anon_struct.types.len, + else => unreachable, }; } /// Supports structs and unions. pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => return ty.castTag(.tuple).?.data.types[index], - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index], - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.fields.values()[index].ty; }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - return struct_obj.fields.values()[index].ty; - }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.fields.values()[index].ty; - }, - else => unreachable, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.fields.values()[index].ty; }, + .anon_struct_type => |anon_struct| anon_struct.types[index].toType(), + else => unreachable, }; } pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) u32 { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => return ty.castTag(.tuple).?.data.types[index].abiAlignment(mod), - .anon_struct => return ty.castTag(.anon_struct).?.data.types[index].abiAlignment(mod), - else => unreachable, + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.layout != .Packed); + return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.layout != .Packed); - return struct_obj.fields.values()[index].alignment(mod, struct_obj.layout); - }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.fields.values()[index].normalAlignment(mod); - }, - else => unreachable, + .anon_struct_type => |anon_struct| { + return anon_struct.types[index].toType().abiAlignment(mod); }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.fields.values()[index].normalAlignment(mod); + }, + else => unreachable, } } pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - return tuple.values[index]; - }, - .anon_struct => { - const struct_obj = ty.castTag(.anon_struct).?.data; - return struct_obj.values[index]; - }, - else => unreachable, + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.fields.values()[index].default_val; }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - return struct_obj.fields.values()[index].default_val; - }, - else => unreachable, + .anon_struct_type => |anon_struct| { + const val = anon_struct.values[index]; + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return val.toValue(); }, + else => unreachable, } } pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - const val = tuple.values[index]; - if (val.ip_index == .unreachable_value) { - return tuple.types[index].onePossibleValue(mod); - } else { - return val; - } - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - const val = anon_struct.values[index]; - if (val.ip_index == .unreachable_value) { - return anon_struct.types[index].onePossibleValue(mod); - } else { - return val; - } - }, - else => unreachable, + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field = struct_obj.fields.values()[index]; + if (field.is_comptime) { + return field.default_val; + } else { + return field.ty.onePossibleValue(mod); + } }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const field = struct_obj.fields.values()[index]; - if (field.is_comptime) { - return field.default_val; - } else { - return field.ty.onePossibleValue(mod); - } - }, - else => unreachable, + .anon_struct_type => |tuple| { + const val = tuple.values[index]; + if (val == .none) { + return tuple.types[index].toType().onePossibleValue(mod); + } else { + return val.toValue(); + } }, + else => unreachable, } } pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - const val = tuple.values[index]; - return val.ip_index != .unreachable_value; - }, - .anon_struct => { - const anon_struct = ty.castTag(.anon_struct).?.data; - const val = anon_struct.values[index]; - return val.ip_index != .unreachable_value; - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - if (struct_obj.layout == .Packed) return false; - const field = struct_obj.fields.values()[index]; - return field.is_comptime; - }, - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + if (struct_obj.layout == .Packed) return false; + const field = struct_obj.fields.values()[index]; + return field.is_comptime; }, - } + .anon_struct_type => |anon_struct| anon_struct.values[index] != .none, + else => unreachable, + }; } pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, mod: *Module) u32 { @@ -4516,46 +4253,43 @@ pub const Type = struct { pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 { switch (ty.ip_index) { .none => switch (ty.tag()) { - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + assert(struct_obj.haveLayout()); + assert(struct_obj.layout != .Packed); + var it = ty.iterateStructOffsets(mod); + while (it.next()) |field_offset| { + if (index == field_offset.field) + return field_offset.offset; + } + return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); + }, + + .anon_struct_type => |tuple| { var offset: u64 = 0; var big_align: u32 = 0; - for (tuple.types, 0..) |field_ty, i| { - const field_val = tuple.values[i]; - if (field_val.ip_index != .unreachable_value or !field_ty.hasRuntimeBits(mod)) { + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none or !field_ty.toType().hasRuntimeBits(mod)) { // comptime field if (i == index) return offset; continue; } - const field_align = field_ty.abiAlignment(mod); + const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); offset = std.mem.alignForwardGeneric(u64, offset, field_align); if (i == index) return offset; - offset += field_ty.abiSize(mod); + offset += field_ty.toType().abiSize(mod); } offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); return offset; }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - assert(struct_obj.haveLayout()); - assert(struct_obj.layout != .Packed); - var it = ty.iterateStructOffsets(mod); - while (it.next()) |field_offset| { - if (index == field_offset.field) - return field_offset.offset; - } - - return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); - }, - .union_type => |union_type| { if (!union_type.hasTag()) return 0; @@ -4655,10 +4389,6 @@ pub const Type = struct { inferred_alloc_const, // See last_no_payload_tag below. // After this, the tag requires a payload. - /// Possible Value tags for this: @"struct" - tuple, - /// Possible Value tags for this: @"struct" - anon_struct, pointer, function, optional, @@ -4691,8 +4421,6 @@ pub const Type = struct { .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, - .tuple => Payload.Tuple, - .anon_struct => Payload.AnonStruct, }; } @@ -4723,83 +4451,48 @@ pub const Type = struct { pub fn isTuple(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .tuple => true, - else => false, - }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; return struct_obj.is_tuple; }, + .anon_struct_type => |anon_struct| anon_struct.names.len == 0, else => false, }, }; } - pub fn isAnonStruct(ty: Type) bool { - return switch (ty.ip_index) { - .empty_struct_type => true, - .none => switch (ty.tag()) { - .anon_struct => true, - else => false, - }, + pub fn isAnonStruct(ty: Type, mod: *Module) bool { + if (ty.ip_index == .empty_struct_type) return true; + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, else => false, }; } pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool { - return switch (ty.ip_index) { - .empty_struct_type => true, - .none => switch (ty.tag()) { - .tuple, .anon_struct => true, - else => false, - }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; - return struct_obj.is_tuple; - }, - else => false, - }, - }; - } - - pub fn isSimpleTuple(ty: Type) bool { - return switch (ty.ip_index) { - .empty_struct_type => true, - .none => switch (ty.tag()) { - .tuple => true, - else => false, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + return struct_obj.is_tuple; }, + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, else => false, }; } - pub fn isSimpleTupleOrAnonStruct(ty: Type) bool { - return switch (ty.ip_index) { - .empty_struct_type => true, - .none => switch (ty.tag()) { - .tuple, .anon_struct => true, - else => false, - }, + pub fn isSimpleTuple(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, else => false, }; } - // Only allowed for simple tuple types - pub fn tupleFields(ty: Type) Payload.Tuple.Data { - return switch (ty.ip_index) { - .empty_struct_type => .{ .types = &.{}, .values = &.{} }, - .none => switch (ty.tag()) { - .tuple => ty.castTag(.tuple).?.data, - .anon_struct => .{ - .types = ty.castTag(.anon_struct).?.data.types, - .values = ty.castTag(.anon_struct).?.data.values, - }, - else => unreachable, - }, - else => unreachable, + pub fn isSimpleTupleOrAnonStruct(ty: Type, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => true, + else => false, }; } @@ -4947,29 +4640,6 @@ pub const Type = struct { /// memory is owned by `Module` data: []const u8, }; - - pub const Tuple = struct { - base: Payload = .{ .tag = .tuple }, - data: Data, - - pub const Data = struct { - types: []Type, - /// unreachable_value elements are used to indicate runtime-known. - values: []Value, - }; - }; - - pub const AnonStruct = struct { - base: Payload = .{ .tag = .anon_struct }, - data: Data, - - pub const Data = struct { - names: []const []const u8, - types: []Type, - /// unreachable_value elements are used to indicate runtime-known. - values: []Value, - }; - }; }; pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined }; diff --git a/src/value.zig b/src/value.zig index 84408424f0..50e3fc8061 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1889,26 +1889,28 @@ pub const Value = struct { const b_field_vals = b.castTag(.aggregate).?.data; assert(a_field_vals.len == b_field_vals.len); - if (ty.isSimpleTupleOrAnonStruct()) { - const types = ty.tupleFields().types; - assert(types.len == a_field_vals.len); - for (types, 0..) |field_ty, i| { - if (!(try eqlAdvanced(a_field_vals[i], field_ty, b_field_vals[i], field_ty, mod, opt_sema))) { - return false; + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .anon_struct_type => |anon_struct| { + assert(anon_struct.types.len == a_field_vals.len); + for (anon_struct.types, 0..) |field_ty, i| { + if (!(try eqlAdvanced(a_field_vals[i], field_ty.toType(), b_field_vals[i], field_ty.toType(), mod, opt_sema))) { + return false; + } } - } - return true; - } - - if (ty.zigTypeTag(mod) == .Struct) { - const fields = ty.structFields(mod).values(); - assert(fields.len == a_field_vals.len); - for (fields, 0..) |field, i| { - if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) { - return false; + return true; + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const fields = struct_obj.fields.values(); + assert(fields.len == a_field_vals.len); + for (fields, 0..) |field, i| { + if (!(try eqlAdvanced(a_field_vals[i], field.ty, b_field_vals[i], field.ty, mod, opt_sema))) { + return false; + } } - } - return true; + return true; + }, + else => {}, } const elem_ty = ty.childType(mod); @@ -2017,20 +2019,6 @@ pub const Value = struct { if ((try ty.onePossibleValue(mod)) != null) { return true; } - if (a_ty.castTag(.anon_struct)) |payload| { - const tuple = payload.data; - if (tuple.values.len != 1) { - return false; - } - const field_name = tuple.names[0]; - const union_obj = mod.typeToUnion(ty).?; - const field_index = @intCast(u32, union_obj.fields.getIndex(field_name) orelse return false); - const tag_and_val = b.castTag(.@"union").?.data; - const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, field_index); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, mod); - if (!tag_matches) return false; - return eqlAdvanced(tag_and_val.val, union_obj.tag_ty, tuple.values[0], tuple.types[0], mod, opt_sema); - } return false; }, .Float => { -- cgit v1.2.3 From 17882162b3be5542b4e289e5ddc6535a4bb4c6b1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 15 May 2023 20:09:54 -0700 Subject: stage2: move function types to InternPool --- lib/std/builtin.zig | 2 +- src/Air.zig | 7 +- src/InternPool.zig | 244 ++++++++++++++++++++++----- src/Module.zig | 38 ++++- src/Sema.zig | 334 ++++++++++++++++++------------------ src/Zir.zig | 5 +- src/arch/aarch64/CodeGen.zig | 44 +++-- src/arch/arm/CodeGen.zig | 44 +++-- src/arch/riscv64/CodeGen.zig | 22 +-- src/arch/sparc64/CodeGen.zig | 22 +-- src/arch/wasm/CodeGen.zig | 92 +++++----- src/arch/x86_64/CodeGen.zig | 34 ++-- src/codegen.zig | 2 +- src/codegen/c.zig | 13 +- src/codegen/c/type.zig | 34 ++-- src/codegen/llvm.zig | 235 +++++++++++++------------- src/codegen/spirv.zig | 22 +-- src/link/Coff.zig | 2 +- src/link/Dwarf.zig | 2 +- src/link/SpirV.zig | 6 +- src/target.zig | 11 ++ src/type.zig | 392 ++++++++++++------------------------------- src/value.zig | 5 + 23 files changed, 821 insertions(+), 791 deletions(-) (limited to 'src/arch') diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 429654bd4a..3e8970a354 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -143,7 +143,7 @@ pub const Mode = OptimizeMode; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. -pub const CallingConvention = enum { +pub const CallingConvention = enum(u8) { /// This is the default Zig calling convention used when not using `export` on `fn` /// and no other calling convention is specified. Unspecified, diff --git a/src/Air.zig b/src/Air.zig index e82a70100f..09f8d6c9e2 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -845,7 +845,6 @@ pub const Inst = struct { pub const Ref = enum(u32) { u1_type = @enumToInt(InternPool.Index.u1_type), - u5_type = @enumToInt(InternPool.Index.u5_type), u8_type = @enumToInt(InternPool.Index.u8_type), i8_type = @enumToInt(InternPool.Index.i8_type), u16_type = @enumToInt(InternPool.Index.u16_type), @@ -914,8 +913,8 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), - one_u5 = @enumToInt(InternPool.Index.one_u5), - four_u5 = @enumToInt(InternPool.Index.four_u5), + one_u8 = @enumToInt(InternPool.Index.one_u8), + four_u8 = @enumToInt(InternPool.Index.four_u8), negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), @@ -1383,7 +1382,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); - return callee_ty.fnReturnType(); + return callee_ty.fnReturnTypeIp(ip); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { diff --git a/src/InternPool.zig b/src/InternPool.zig index 2435e0ad31..d4bfe5a244 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -148,6 +148,7 @@ pub const Key = union(enum) { union_type: UnionType, opaque_type: OpaqueType, enum_type: EnumType, + func_type: FuncType, /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented /// via `simple_value` and has a named `Index` tag for it. @@ -185,6 +186,13 @@ pub const Key = union(enum) { /// If zero use pointee_type.abiAlignment() /// When creating pointer types, if alignment is equal to pointee type /// abi alignment, this value should be set to 0 instead. + /// + /// Please don't change this to u32 or u29. If you want to save bits, + /// migrate the rest of the codebase to use the `Alignment` type rather + /// than using byte units. The LLVM backend can only handle `c_uint` + /// byte units; we can emit a semantic analysis error if alignment that + /// overflows that amount is attempted to be used, but it shouldn't + /// affect the other backends. alignment: u64 = 0, /// If this is non-zero it means the pointer points to a sub-byte /// range of data, which is backed by a "host integer" with this @@ -358,6 +366,44 @@ pub const Key = union(enum) { } }; + pub const FuncType = struct { + param_types: []Index, + return_type: Index, + /// Tells whether a parameter is comptime. See `paramIsComptime` helper + /// method for accessing this. + comptime_bits: u32, + /// Tells whether a parameter is noalias. See `paramIsNoalias` helper + /// method for accessing this. + noalias_bits: u32, + /// If zero use default target function code alignment. + /// + /// Please don't change this to u32 or u29. If you want to save bits, + /// migrate the rest of the codebase to use the `Alignment` type rather + /// than using byte units. The LLVM backend can only handle `c_uint` + /// byte units; we can emit a semantic analysis error if alignment that + /// overflows that amount is attempted to be used, but it shouldn't + /// affect the other backends. + alignment: u64, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + + pub fn paramIsComptime(self: @This(), i: u5) bool { + assert(i < self.param_types.len); + return @truncate(u1, self.comptime_bits >> i) != 0; + } + + pub fn paramIsNoalias(self: @This(), i: u5) bool { + assert(i < self.param_types.len); + return @truncate(u1, self.noalias_bits >> i) != 0; + } + }; + pub const Int = struct { ty: Index, storage: Storage, @@ -512,6 +558,18 @@ pub const Key = union(enum) { for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem); for (anon_struct_type.names) |elem| std.hash.autoHash(hasher, elem); }, + + .func_type => |func_type| { + for (func_type.param_types) |param_type| std.hash.autoHash(hasher, param_type); + std.hash.autoHash(hasher, func_type.return_type); + std.hash.autoHash(hasher, func_type.comptime_bits); + std.hash.autoHash(hasher, func_type.noalias_bits); + std.hash.autoHash(hasher, func_type.alignment); + std.hash.autoHash(hasher, func_type.cc); + std.hash.autoHash(hasher, func_type.is_var_args); + std.hash.autoHash(hasher, func_type.is_generic); + std.hash.autoHash(hasher, func_type.is_noinline); + }, } } @@ -670,6 +728,20 @@ pub const Key = union(enum) { std.mem.eql(Index, a_info.values, b_info.values) and std.mem.eql(NullTerminatedString, a_info.names, b_info.names); }, + + .func_type => |a_info| { + const b_info = b.func_type; + + return std.mem.eql(Index, a_info.param_types, b_info.param_types) and + a_info.return_type == b_info.return_type and + a_info.comptime_bits == b_info.comptime_bits and + a_info.noalias_bits == b_info.noalias_bits and + a_info.alignment == b_info.alignment and + a_info.cc == b_info.cc and + a_info.is_var_args == b_info.is_var_args and + a_info.is_generic == b_info.is_generic and + a_info.is_noinline == b_info.is_noinline; + }, } } @@ -687,6 +759,7 @@ pub const Key = union(enum) { .opaque_type, .enum_type, .anon_struct_type, + .func_type, => .type_type, inline .ptr, @@ -734,7 +807,6 @@ pub const Index = enum(u32) { pub const last_value: Index = .empty_struct; u1_type, - u5_type, u8_type, i8_type, u16_type, @@ -811,10 +883,10 @@ pub const Index = enum(u32) { one, /// `1` (usize) one_usize, - /// `1` (u5) - one_u5, - /// `4` (u5) - four_u5, + /// `1` (u8) + one_u8, + /// `4` (u8) + four_u8, /// `-1` (comptime_int) negative_one, /// `std.builtin.CallingConvention.C` @@ -880,12 +952,6 @@ pub const static_keys = [_]Key{ .bits = 1, } }, - // u5_type - .{ .int_type = .{ - .signedness = .unsigned, - .bits = 5, - } }, - .{ .int_type = .{ .signedness = .unsigned, .bits = 8, @@ -1074,14 +1140,14 @@ pub const static_keys = [_]Key{ .storage = .{ .u64 = 1 }, } }, - // one_u5 + // one_u8 .{ .int = .{ - .ty = .u5_type, + .ty = .u8_type, .storage = .{ .u64 = 1 }, } }, - // four_u5 + // four_u8 .{ .int = .{ - .ty = .u5_type, + .ty = .u8_type, .storage = .{ .u64 = 4 }, } }, // negative_one @@ -1092,12 +1158,12 @@ pub const static_keys = [_]Key{ // calling_convention_c .{ .enum_tag = .{ .ty = .calling_convention_type, - .int = .one_u5, + .int = .one_u8, } }, // calling_convention_inline .{ .enum_tag = .{ .ty = .calling_convention_type, - .int = .four_u5, + .int = .four_u8, } }, .{ .simple_value = .void }, @@ -1181,6 +1247,9 @@ pub const Tag = enum(u8) { /// An untagged union type which has a safety tag. /// `data` is `Module.Union.Index`. type_union_safety, + /// A function body type. + /// `data` is extra index to `TypeFunction`. + type_function, /// Typed `undefined`. /// `data` is `Index` of the type. @@ -1283,6 +1352,29 @@ pub const Tag = enum(u8) { aggregate, }; +/// Trailing: +/// 0. param_type: Index for each params_len +pub const TypeFunction = struct { + params_len: u32, + return_type: Index, + comptime_bits: u32, + noalias_bits: u32, + flags: Flags, + + pub const Flags = packed struct(u32) { + alignment: Alignment, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + _: u11 = 0, + }; +}; + /// Trailing: /// 0. element: Index for each len /// len is determined by the aggregate type. @@ -1371,24 +1463,6 @@ pub const Pointer = struct { flags: Flags, packed_offset: PackedOffset, - /// Stored as a power-of-two, with one special value to indicate none. - pub const Alignment = enum(u6) { - none = std.math.maxInt(u6), - _, - - pub fn toByteUnits(a: Alignment, default: u64) u64 { - return switch (a) { - .none => default, - _ => @as(u64, 1) << @enumToInt(a), - }; - } - - pub fn fromByteUnits(n: u64) Alignment { - if (n == 0) return .none; - return @intToEnum(Alignment, @ctz(n)); - } - }; - pub const Flags = packed struct(u32) { size: Size, alignment: Alignment, @@ -1409,6 +1483,24 @@ pub const Pointer = struct { pub const VectorIndex = Key.PtrType.VectorIndex; }; +/// Stored as a power-of-two, with one special value to indicate none. +pub const Alignment = enum(u6) { + none = std.math.maxInt(u6), + _, + + pub fn toByteUnits(a: Alignment, default: u64) u64 { + return switch (a) { + .none => default, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + + pub fn fromByteUnits(n: u64) Alignment { + if (n == 0) return .none; + return @intToEnum(Alignment, @ctz(n)); + } +}; + /// Used for non-sentineled arrays that have length fitting in u32, as well as /// vectors. pub const Vector = struct { @@ -1765,6 +1857,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }, .type_enum_explicit => indexToKeyEnum(ip, data, .explicit), .type_enum_nonexhaustive => indexToKeyEnum(ip, data, .nonexhaustive), + .type_function => .{ .func_type = indexToKeyFuncType(ip, data) }, .undef => .{ .undef = @intToEnum(Index, data) }, .opt_null => .{ .opt = .{ @@ -1896,6 +1989,29 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { }; } +fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { + const type_function = ip.extraDataTrail(TypeFunction, data); + const param_types = @ptrCast( + []Index, + ip.extra.items[type_function.end..][0..type_function.data.params_len], + ); + return .{ + .param_types = param_types, + .return_type = type_function.data.return_type, + .comptime_bits = type_function.data.comptime_bits, + .noalias_bits = type_function.data.noalias_bits, + .alignment = type_function.data.flags.alignment.toByteUnits(0), + .cc = type_function.data.flags.cc, + .is_var_args = type_function.data.flags.is_var_args, + .is_generic = type_function.data.flags.is_generic, + .is_noinline = type_function.data.flags.is_noinline, + .align_is_generic = type_function.data.flags.align_is_generic, + .cc_is_generic = type_function.data.flags.cc_is_generic, + .section_is_generic = type_function.data.flags.section_is_generic, + .addrspace_is_generic = type_function.data.flags.addrspace_is_generic, + }; +} + /// Asserts the integer tag type is already present in the InternPool. fn getEnumIntTagType(ip: InternPool, fields_len: u32) Index { return ip.getAssumeExists(.{ .int_type = .{ @@ -1977,7 +2093,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .child = ptr_type.elem_type, .sentinel = ptr_type.sentinel, .flags = .{ - .alignment = Pointer.Alignment.fromByteUnits(ptr_type.alignment), + .alignment = Alignment.fromByteUnits(ptr_type.alignment), .is_const = ptr_type.is_const, .is_volatile = ptr_type.is_volatile, .is_allowzero = ptr_type.is_allowzero, @@ -2163,6 +2279,37 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, + .func_type => |func_type| { + assert(func_type.return_type != .none); + for (func_type.param_types) |param_type| assert(param_type != .none); + + const params_len = @intCast(u32, func_type.param_types.len); + + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len + + params_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_function, + .data = ip.addExtraAssumeCapacity(TypeFunction{ + .params_len = params_len, + .return_type = func_type.return_type, + .comptime_bits = func_type.comptime_bits, + .noalias_bits = func_type.noalias_bits, + .flags = .{ + .alignment = Alignment.fromByteUnits(func_type.alignment), + .cc = func_type.cc, + .is_var_args = func_type.is_var_args, + .is_generic = func_type.is_generic, + .is_noinline = func_type.is_noinline, + .align_is_generic = func_type.align_is_generic, + .cc_is_generic = func_type.cc_is_generic, + .section_is_generic = func_type.section_is_generic, + .addrspace_is_generic = func_type.addrspace_is_generic, + }, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types)); + }, + .extern_func => @panic("TODO"), .ptr => |ptr| switch (ptr.addr) { @@ -2736,6 +2883,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { OptionalMapIndex => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), Pointer.Flags => @bitCast(u32, @field(extra, field.name)), + TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), @@ -2797,6 +2945,7 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), i32 => @bitCast(i32, int32), Pointer.Flags => @bitCast(Pointer.Flags, int32), + TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), else => @compileError("bad field type: " ++ @typeName(field.type)), @@ -2988,17 +3137,17 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } } -pub fn indexToStruct(ip: *InternPool, val: Index) Module.Struct.OptionalIndex { +pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex { + assert(val != .none); const tags = ip.items.items(.tag); - if (val == .none) return .none; if (tags[@enumToInt(val)] != .type_struct) return .none; const datas = ip.items.items(.data); return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex { +pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { + assert(val != .none); const tags = ip.items.items(.tag); - if (val == .none) return .none; switch (tags[@enumToInt(val)]) { .type_union_tagged, .type_union_untagged, .type_union_safety => {}, else => return .none, @@ -3007,6 +3156,16 @@ pub fn indexToUnion(ip: *InternPool, val: Index) Module.Union.OptionalIndex { return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); } +pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { + assert(val != .none); + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + switch (tags[@enumToInt(val)]) { + .type_function => return indexToKeyFuncType(ip, datas[@enumToInt(val)]), + else => return null, + } +} + pub fn isOptionalType(ip: InternPool, ty: Index) bool { const tags = ip.items.items(.tag); if (ty == .none) return false; @@ -3092,6 +3251,11 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_union_safety, => @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + .type_function => b: { + const info = ip.extraData(TypeFunction, data); + break :b @sizeOf(TypeFunction) + (@sizeOf(u32) * info.params_len); + }, + .undef => 0, .simple_type => 0, .simple_value => 0, diff --git a/src/Module.zig b/src/Module.zig index cf1fea3444..c8e676f813 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -846,7 +846,7 @@ pub const Decl = struct { pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; const ty = (decl.val.castTag(.ty) orelse return .none).data; - return mod.intern_pool.indexToStruct(ty.ip_index); + return mod.intern_pool.indexToStructType(ty.ip_index); } /// If the Decl has a value and it is a union, return it, @@ -4764,7 +4764,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.analysis = .complete; decl.generation = mod.generation; - const is_inline = decl.ty.fnCallingConvention() == .Inline; + const is_inline = decl.ty.fnCallingConvention(mod) == .Inline; if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; if (is_inline) { @@ -5617,6 +5617,9 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); + const fn_ty = decl.ty; + const fn_ty_info = mod.typeToFunc(fn_ty).?; + var sema: Sema = .{ .mod = mod, .gpa = gpa, @@ -5626,7 +5629,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { .owner_decl = decl, .owner_decl_index = decl_index, .func = func, - .fn_ret_ty = decl.ty.fnReturnType(), + .fn_ret_ty = fn_ty_info.return_type.toType(), .owner_func = func, .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), }; @@ -5664,8 +5667,6 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const fn_ty = decl.ty; - const fn_ty_info = fn_ty.fnInfo(); const runtime_params_len = @intCast(u32, fn_ty_info.param_types.len); try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` @@ -5692,7 +5693,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { sema.inst_map.putAssumeCapacityNoClobber(inst, arg); total_param_index += 1; continue; - } else fn_ty_info.param_types[runtime_param_index]; + } else fn_ty_info.param_types[runtime_param_index].toType(); const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, @@ -6864,6 +6865,10 @@ pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } +pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type { + return (try intern(mod, .{ .func_type = info })).toType(); +} + /// Supports optionals in addition to pointers. pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { if (ty.isPtrLikeOptional(mod)) { @@ -6996,6 +7001,16 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { return i.toValue(); } +pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value { + const ip = &mod.intern_pool; + assert(ip.isOptionalType(opt_ty.ip_index)); + const result = try ip.get(mod.gpa, .{ .opt = .{ + .ty = opt_ty.ip_index, + .val = .none, + } }); + return result.toValue(); +} + pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); } @@ -7201,15 +7216,22 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I /// * A struct which has no fields (`struct {}`). /// * Not a struct. pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { - const struct_index = mod.intern_pool.indexToStruct(ty.ip_index).unwrap() orelse return null; + if (ty.ip_index == .none) return null; + const struct_index = mod.intern_pool.indexToStructType(ty.ip_index).unwrap() orelse return null; return mod.structPtr(struct_index); } pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { - const union_index = mod.intern_pool.indexToUnion(ty.ip_index).unwrap() orelse return null; + if (ty.ip_index == .none) return null; + const union_index = mod.intern_pool.indexToUnionType(ty.ip_index).unwrap() orelse return null; return mod.unionPtr(union_index); } +pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { + if (ty.ip_index == .none) return null; + return mod.intern_pool.indexToFuncType(ty.ip_index); +} + pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(owner_decl_index); diff --git a/src/Sema.zig b/src/Sema.zig index 74b3cdd114..eb8dc5a633 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5850,6 +5850,7 @@ pub fn analyzeExport( } fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const src = LazySrcLoc.nodeOffset(extra.node); @@ -5862,8 +5863,8 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst const func = sema.func orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); - const fn_owner_decl = sema.mod.declPtr(func.owner_decl); - switch (fn_owner_decl.ty.fnCallingConvention()) { + const fn_owner_decl = mod.declPtr(func.owner_decl); + switch (fn_owner_decl.ty.fnCallingConvention(mod)) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), else => if (block.inlining != null) { @@ -5871,7 +5872,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst }, } - const gop = try sema.mod.align_stack_fns.getOrPut(sema.mod.gpa, func); + const gop = try mod.align_stack_fns.getOrPut(mod.gpa, func); if (gop.found_existing) { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); @@ -6378,7 +6379,7 @@ fn zirCall( var input_is_error = false; const block_index = @intCast(Air.Inst.Index, block.instructions.items.len); - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const parent_comptime = block.is_comptime; // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument. @@ -6393,7 +6394,7 @@ fn zirCall( // Generate args to comptime params in comptime block. defer block.is_comptime = parent_comptime; - if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) { + if (arg_index < fn_params_len and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { block.is_comptime = true; // TODO set comptime_reason } @@ -6402,10 +6403,10 @@ fn zirCall( if (arg_index >= fn_params_len) break :inst Air.Inst.Ref.var_args_param_type; - if (func_ty_info.param_types[arg_index].isGenericPoison()) + if (func_ty_info.param_types[arg_index] == .generic_poison_type) break :inst Air.Inst.Ref.generic_poison_type; - break :inst try sema.addType(func_ty_info.param_types[arg_index]); + break :inst try sema.addType(func_ty_info.param_types[arg_index].toType()); }); const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); @@ -6506,7 +6507,7 @@ fn checkCallArgumentCount( return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)}); }; - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const args_len = total_args - @boolToInt(member_fn); if (func_ty_info.is_var_args) { @@ -6562,7 +6563,7 @@ fn callBuiltin( std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(sema.mod)}); }; - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) { std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len }); @@ -6573,7 +6574,7 @@ fn callBuiltin( const GenericCallAdapter = struct { generic_fn: *Module.Fn, precomputed_hash: u64, - func_ty_info: Type.Payload.Function.Data, + func_ty_info: InternPool.Key.FuncType, args: []const Arg, module: *Module, @@ -6656,7 +6657,7 @@ fn analyzeCall( const mod = sema.mod; const callee_ty = sema.typeOf(func); - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const cc = func_ty_info.cc; if (cc == .Naked) { @@ -6704,7 +6705,7 @@ fn analyzeCall( var comptime_reason_buf: Block.ComptimeReason = undefined; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_comptime_call) { - if (sema.typeRequiresComptime(func_ty_info.return_type)) |ct| { + if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| { is_comptime_call = ct; if (ct) { // stage1 can't handle doing this directly @@ -6712,7 +6713,7 @@ fn analyzeCall( .block = block, .func = func, .func_src = func_src, - .return_ty = func_ty_info.return_type, + .return_ty = func_ty_info.return_type.toType(), } }; comptime_reason = &comptime_reason_buf; } @@ -6750,7 +6751,7 @@ fn analyzeCall( .block = block, .func = func, .func_src = func_src, - .return_ty = func_ty_info.return_type, + .return_ty = func_ty_info.return_type.toType(), } }; comptime_reason = &comptime_reason_buf; }, @@ -6875,9 +6876,9 @@ fn analyzeCall( // comptime state. var should_memoize = true; - var new_fn_info = fn_owner_decl.ty.fnInfo(); - new_fn_info.param_types = try sema.arena.alloc(Type, new_fn_info.param_types.len); - new_fn_info.comptime_params = (try sema.arena.alloc(bool, new_fn_info.param_types.len)).ptr; + var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?; + new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len); + new_fn_info.comptime_bits = 0; // This will have return instructions analyzed as break instructions to // the block_inst above. Here we are performing "comptime/inline semantic analysis" @@ -6970,7 +6971,7 @@ fn analyzeCall( } break :blk bare_return_type; }; - new_fn_info.return_type = fn_ret_ty; + new_fn_info.return_type = fn_ret_ty.ip_index; const parent_fn_ret_ty = sema.fn_ret_ty; sema.fn_ret_ty = fn_ret_ty; defer sema.fn_ret_ty = parent_fn_ret_ty; @@ -6993,7 +6994,7 @@ fn analyzeCall( } } - const new_func_resolved_ty = try Type.Tag.function.create(sema.arena, new_fn_info); + const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin); @@ -7081,13 +7082,14 @@ fn analyzeCall( assert(!func_ty_info.is_generic); const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len); + const fn_info = mod.typeToFunc(func_ty).?; for (uncasted_args, 0..) |uncasted_arg, i| { if (i < fn_params_len) { const opts: CoerceOpts = .{ .param_src = .{ .func_inst = func, .param_i = @intCast(u32, i), } }; - const param_ty = func_ty.fnParamType(i); + const param_ty = fn_info.param_types[i].toType(); args[i] = sema.analyzeCallArg( block, .unneeded, @@ -7126,8 +7128,8 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - try sema.queueFullTypeResolution(func_ty_info.return_type); - if (sema.owner_func != null and func_ty_info.return_type.isError(mod)) { + try sema.queueFullTypeResolution(func_ty_info.return_type.toType()); + if (sema.owner_func != null and func_ty_info.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7155,7 +7157,7 @@ fn analyzeCall( try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src); } return sema.handleTailCall(block, call_src, func_ty, func_inst); - } else if (block.wantSafety() and func_ty_info.return_type.isNoReturn()) { + } else if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) { // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. check: { @@ -7171,7 +7173,7 @@ fn analyzeCall( try sema.safetyPanic(block, .noreturn_returned); return Air.Inst.Ref.unreachable_value; - } else if (func_ty_info.return_type.isNoReturn()) { + } else if (func_ty_info.return_type == .noreturn_type) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7208,13 +7210,13 @@ fn analyzeInlineCallArg( param_block: *Block, arg_src: LazySrcLoc, inst: Zir.Inst.Index, - new_fn_info: Type.Payload.Function.Data, + new_fn_info: InternPool.Key.FuncType, arg_i: *usize, uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, should_memoize: *bool, memoized_call_key: Module.MemoizedCall.Key, - raw_param_types: []const Type, + raw_param_types: []const InternPool.Index, func_inst: Air.Inst.Ref, has_comptime_args: *bool, ) !void { @@ -7233,13 +7235,14 @@ fn analyzeInlineCallArg( const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const param_ty = param_ty: { const raw_param_ty = raw_param_types[arg_i.*]; - if (!raw_param_ty.isGenericPoison()) break :param_ty raw_param_ty; + if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty; const param_ty_inst = try sema.resolveBody(param_block, param_body, inst); - break :param_ty try sema.analyzeAsType(param_block, param_src, param_ty_inst); + const param_ty = try sema.analyzeAsType(param_block, param_src, param_ty_inst); + break :param_ty param_ty.toIntern(); }; new_fn_info.param_types[arg_i.*] = param_ty; const uncasted_arg = uncasted_args[arg_i.*]; - if (try sema.typeRequiresComptime(param_ty)) { + if (try sema.typeRequiresComptime(param_ty.toType())) { _ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| { if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; @@ -7247,7 +7250,7 @@ fn analyzeInlineCallArg( } else if (!is_comptime_call and zir_tags[inst] == .param_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); } - const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{ + const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{ .func_inst = func_inst, .param_i = @intCast(u32, arg_i.*), } }) catch |err| switch (err) { @@ -7276,7 +7279,7 @@ fn analyzeInlineCallArg( } should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); memoized_call_key.args[arg_i.*] = .{ - .ty = param_ty, + .ty = param_ty.toType(), .val = arg_val, }; } else { @@ -7292,7 +7295,7 @@ fn analyzeInlineCallArg( .param_anytype, .param_anytype_comptime => { // No coercion needed. const uncasted_arg = uncasted_args[arg_i.*]; - new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg); + new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern(); if (is_comptime_call) { sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); @@ -7357,7 +7360,7 @@ fn analyzeGenericCallArg( uncasted_arg: Air.Inst.Ref, comptime_arg: TypedValue, runtime_args: []Air.Inst.Ref, - new_fn_info: Type.Payload.Function.Data, + new_fn_info: InternPool.Key.FuncType, runtime_i: *u32, ) !void { const mod = sema.mod; @@ -7365,7 +7368,7 @@ fn analyzeGenericCallArg( comptime_arg.ty.hasRuntimeBits(mod) and !(try sema.typeRequiresComptime(comptime_arg.ty)); if (is_runtime) { - const param_ty = new_fn_info.param_types[runtime_i.*]; + const param_ty = new_fn_info.param_types[runtime_i.*].toType(); const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); try sema.queueFullTypeResolution(param_ty); runtime_args[runtime_i.*] = casted_arg; @@ -7387,7 +7390,7 @@ fn instantiateGenericCall( func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, - func_ty_info: Type.Payload.Function.Data, + func_ty_info: InternPool.Key.FuncType, ensure_result_used: bool, uncasted_args: []const Air.Inst.Ref, call_tag: Air.Inst.Tag, @@ -7431,14 +7434,14 @@ fn instantiateGenericCall( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7609,7 +7612,7 @@ fn instantiateGenericCall( // Make a runtime call to the new function, making sure to omit the comptime args. const comptime_args = callee.comptime_args.?; const func_ty = mod.declPtr(callee.owner_decl).ty; - const new_fn_info = func_ty.fnInfo(); + const new_fn_info = mod.typeToFunc(func_ty).?; const runtime_args_len = @intCast(u32, new_fn_info.param_types.len); const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { @@ -7647,12 +7650,12 @@ fn instantiateGenericCall( total_i += 1; } - try sema.queueFullTypeResolution(new_fn_info.return_type); + try sema.queueFullTypeResolution(new_fn_info.return_type.toType()); } if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func != null and new_fn_info.return_type.isError(mod)) { + if (sema.owner_func != null and new_fn_info.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } @@ -7677,7 +7680,7 @@ fn instantiateGenericCall( if (call_tag == .call_always_tail) { return sema.handleTailCall(block, call_src, func_ty, result); } - if (new_fn_info.return_type.isNoReturn()) { + if (new_fn_info.return_type == .noreturn_type) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7695,7 +7698,7 @@ fn resolveGenericInstantiationType( module_fn: *Module.Fn, new_module_func: *Module.Fn, namespace: Namespace.Index, - func_ty_info: Type.Payload.Function.Data, + func_ty_info: InternPool.Key.FuncType, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, ) !*Module.Fn { @@ -7755,14 +7758,14 @@ fn resolveGenericInstantiationType( var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7822,13 +7825,13 @@ fn resolveGenericInstantiationType( var is_comptime = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_comptime = true; @@ -7868,8 +7871,8 @@ fn resolveGenericInstantiationType( new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator); // If the call evaluated to a return type that requires comptime, never mind // our generic instantiation. Instead we need to perform a comptime call. - const new_fn_info = new_decl.ty.fnInfo(); - if (try sema.typeRequiresComptime(new_fn_info.return_type)) { + const new_fn_info = mod.typeToFunc(new_decl.ty).?; + if (try sema.typeRequiresComptime(new_fn_info.return_type.toType())) { return error.ComptimeReturn; } // Similarly, if the call evaluated to a generic type we need to instead @@ -8969,19 +8972,19 @@ fn funcCommon( // the instantiation, which can depend on comptime parameters. // Related proposal: https://github.com/ziglang/zig/issues/11834 const cc_resolved = cc orelse .Unspecified; - const param_types = try sema.arena.alloc(Type, block.params.items.len); - const comptime_params = try sema.arena.alloc(bool, block.params.items.len); - for (block.params.items, 0..) |param, i| { + const param_types = try sema.arena.alloc(InternPool.Index, block.params.items.len); + var comptime_bits: u32 = 0; + for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| { const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; break :blk @truncate(u1, noalias_bits >> index) != 0; }; - param_types[i] = param.ty; + dest_param_ty.* = param.ty.toIntern(); sema.analyzeParameter( block, .unneeded, param, - comptime_params, + &comptime_bits, i, &is_generic, cc_resolved, @@ -8994,7 +8997,7 @@ fn funcCommon( block, Module.paramSrc(src_node_offset, mod, decl, i), param, - comptime_params, + &comptime_bits, i, &is_generic, cc_resolved, @@ -9019,7 +9022,7 @@ fn funcCommon( else => |e| return e, }; - const return_type = if (!inferred_error_set or ret_poison) + const return_type: Type = if (!inferred_error_set or ret_poison) bare_return_type else blk: { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); @@ -9047,7 +9050,9 @@ fn funcCommon( }; return sema.failWithOwnedErrorMsg(msg); } - if (!ret_poison and !Type.fnCallingConventionAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(return_type, .ret_ty)) { + if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and + !try sema.validateExternType(return_type, .ret_ty)) + { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ return_type.fmt(sema.mod), @tagName(cc_resolved), @@ -9141,8 +9146,7 @@ fn funcCommon( return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{}); } if (is_generic and sema.no_partial_func_ty) return error.GenericPoison; - for (comptime_params) |ct| is_generic = is_generic or ct; - is_generic = is_generic or ret_ty_requires_comptime; + is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; if (!is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can @@ -9151,10 +9155,11 @@ fn funcCommon( _ = try sema.resolveTypeFields(unresolved_stack_trace_ty); } - break :fn_ty try Type.Tag.function.create(sema.arena, .{ + break :fn_ty try mod.funcType(.{ .param_types = param_types, - .comptime_params = comptime_params.ptr, - .return_type = return_type, + .noalias_bits = noalias_bits, + .comptime_bits = comptime_bits, + .return_type = return_type.toIntern(), .cc = cc_resolved, .cc_is_generic = cc == null, .alignment = alignment orelse 0, @@ -9164,7 +9169,6 @@ fn funcCommon( .is_var_args = var_args, .is_generic = is_generic, .is_noinline = is_noinline, - .noalias_bits = noalias_bits, }); }; @@ -9203,7 +9207,7 @@ fn funcCommon( return sema.addType(fn_ty); } - const is_inline = fn_ty.fnCallingConvention() == .Inline; + const is_inline = fn_ty.fnCallingConvention(mod) == .Inline; const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .none; const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: { @@ -9243,7 +9247,7 @@ fn analyzeParameter( block: *Block, param_src: LazySrcLoc, param: Block.Param, - comptime_params: []bool, + comptime_bits: *u32, i: usize, is_generic: *bool, cc: std.builtin.CallingConvention, @@ -9252,14 +9256,16 @@ fn analyzeParameter( ) !void { const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); - comptime_params[i] = param.is_comptime or requires_comptime; + if (param.is_comptime or requires_comptime) { + comptime_bits.* |= @as(u32, 1) << @intCast(u5, i); // TODO: handle cast error + } const this_generic = param.ty.isGenericPoison(); is_generic.* = is_generic.* or this_generic; const target = mod.getTarget(); - if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { + if (param.is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } - if (this_generic and !sema.no_partial_func_ty and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { + if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } if (!param.ty.isValidParamType(mod)) { @@ -9275,7 +9281,7 @@ fn analyzeParameter( }; return sema.failWithOwnedErrorMsg(msg); } - if (!this_generic and !Type.fnCallingConventionAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { + if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ param.ty.fmt(mod), @tagName(cc), @@ -15986,22 +15992,18 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ), .Fn => { // TODO: look into memoizing this result. - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; var params_anon_decl = try block.startAnonDecl(); defer params_anon_decl.deinit(); const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len); - for (param_vals, 0..) |*param_val, i| { - const param_ty = info.param_types[i]; - const is_generic = param_ty.isGenericPoison(); - const param_ty_val = if (is_generic) - Value.null - else - try Value.Tag.opt_payload.create( - params_anon_decl.arena(), - try Value.Tag.ty.create(params_anon_decl.arena(), try param_ty.copy(params_anon_decl.arena())), - ); + for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| { + const is_generic = param_ty == .generic_poison_type; + const param_ty_val = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ + .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + .val = if (is_generic) .none else param_ty, + } }); const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; @@ -16015,7 +16017,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_noalias: bool, Value.makeBool(is_noalias), // type: ?type, - param_ty_val, + param_ty_val.toValue(), }; param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields); } @@ -16059,13 +16061,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }); }; - const ret_ty_opt = if (!info.return_type.isGenericPoison()) - try Value.Tag.opt_payload.create( - sema.arena, - try Value.Tag.ty.create(sema.arena, info.return_type), - ) - else - Value.null; + const ret_ty_opt = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ + .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + .val = if (info.return_type == .generic_poison_type) .none else info.return_type, + } }); const callconv_ty = try sema.getBuiltinType("CallingConvention"); @@ -16080,7 +16079,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_var_args: bool, Value.makeBool(info.is_var_args), // return_type: ?type, - ret_ty_opt, + ret_ty_opt.toValue(), // args: []const Fn.Param, args_val, }; @@ -17788,7 +17787,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (inst_data.size != .One) { return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } - const fn_align = elem_ty.fnInfo().alignment; + const fn_align = mod.typeToFunc(elem_ty).?.alignment; if (inst_data.flags.has_align and abi_align != 0 and fn_align != 0 and abi_align != fn_align) { @@ -18939,7 +18938,7 @@ fn zirReify( if (ptr_size != .One) { return sema.fail(block, src, "function pointers must be single pointers", .{}); } - const fn_align = elem_ty.fnInfo().alignment; + const fn_align = mod.typeToFunc(elem_ty).?.alignment; if (abi_align != 0 and fn_align != 0 and abi_align != fn_align) { @@ -19483,12 +19482,10 @@ fn zirReify( const args_slice_val = args_val.castTag(.slice).?.data; const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod)); - const param_types = try sema.arena.alloc(Type, args_len); - const comptime_params = try sema.arena.alloc(bool, args_len); + const param_types = try sema.arena.alloc(InternPool.Index, args_len); var noalias_bits: u32 = 0; - var i: usize = 0; - while (i < args_len) : (i += 1) { + for (param_types, 0..) |*param_type, i| { const arg = try args_slice_val.ptr.elemValue(mod, i); const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here @@ -19505,25 +19502,22 @@ fn zirReify( const param_type_val = param_type_opt_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - const param_type = try param_type_val.toType().copy(sema.arena); + param_type.* = param_type_val.ip_index; if (arg_is_noalias) { - if (!param_type.isPtrAtRuntime(mod)) { + if (!param_type.toType().isPtrAtRuntime(mod)) { return sema.fail(block, src, "non-pointer parameter declared noalias", .{}); } noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse return sema.fail(block, src, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); } - - param_types[i] = param_type; - comptime_params[i] = false; } - var fn_info = Type.Payload.Function.Data{ + const ty = try mod.funcType(.{ .param_types = param_types, - .comptime_params = comptime_params.ptr, + .comptime_bits = 0, .noalias_bits = noalias_bits, - .return_type = try return_type.toType().copy(sema.arena), + .return_type = return_type.toIntern(), .alignment = alignment, .cc = cc, .is_var_args = is_var_args, @@ -19533,9 +19527,7 @@ fn zirReify( .cc_is_generic = false, .section_is_generic = false, .addrspace_is_generic = false, - }; - - const ty = try Type.Tag.function.create(sema.arena, fn_info); + }); return sema.addType(ty); }, .Frame => return sema.failWithUseOfAsync(block, src), @@ -23435,7 +23427,7 @@ fn explainWhyTypeIsComptimeInner( .Pointer => { const elem_ty = ty.elemType2(mod); if (elem_ty.zigTypeTag(mod) == .Fn) { - const fn_info = elem_ty.fnInfo(); + const fn_info = mod.typeToFunc(elem_ty).?; if (fn_info.is_generic) { try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{}); } @@ -23443,7 +23435,7 @@ fn explainWhyTypeIsComptimeInner( .Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}), else => {}, } - if (fn_info.return_type.comptimeOnly(mod)) { + if (fn_info.return_type.toType().comptimeOnly(mod)) { try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{}); } return; @@ -23543,10 +23535,10 @@ fn validateExternType( const target = sema.mod.getTarget(); // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. // The goal is to experiment with more integrated CPU/GPU code. - if (ty.fnCallingConvention() == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { + if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { return true; } - return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention()); + return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod)); }, .Enum => { return sema.validateExternType(try ty.intTagType(mod), position); @@ -23619,7 +23611,7 @@ fn explainWhyTypeIsNotExtern( try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{}); return; } - switch (ty.fnCallingConvention()) { + switch (ty.fnCallingConvention(mod)) { .Unspecified => try mod.errNoteNonLazy(src_loc, msg, "extern function must specify calling convention", .{}), .Async => try mod.errNoteNonLazy(src_loc, msg, "async function cannot be extern", .{}), .Inline => try mod.errNoteNonLazy(src_loc, msg, "inline function cannot be extern", .{}), @@ -24548,10 +24540,10 @@ fn fieldCallBind( try sema.addReferencedBy(block, src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); - if (decl_type.zigTypeTag(mod) == .Fn and - decl_type.fnParamLen() >= 1) - { - const first_param_type = decl_type.fnParamType(0); + if (mod.typeToFunc(decl_type)) |func_type| f: { + if (func_type.param_types.len == 0) break :f; + + const first_param_type = func_type.param_types[0].toType(); // zig fmt: off if (first_param_type.isGenericPoison() or ( first_param_type.zigTypeTag(mod) == .Pointer and @@ -27090,8 +27082,9 @@ fn coerceInMemoryAllowedFns( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { - const dest_info = dest_ty.fnInfo(); - const src_info = src_ty.fnInfo(); + const mod = sema.mod; + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; if (dest_info.is_var_args != src_info.is_var_args) { return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; @@ -27108,13 +27101,13 @@ fn coerceInMemoryAllowedFns( } }; } - if (!src_info.return_type.isNoReturn()) { - const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type, src_info.return_type, false, target, dest_src, src_src); + if (src_info.return_type != .noreturn_type) { + const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type.toType(), src_info.return_type.toType(), false, target, dest_src, src_src); if (rt != .ok) { return InMemoryCoercionResult{ .fn_return_type = .{ .child = try rt.dupe(sema.arena), - .actual = src_info.return_type, - .wanted = dest_info.return_type, + .actual = src_info.return_type.toType(), + .wanted = dest_info.return_type.toType(), } }; } } @@ -27134,22 +27127,23 @@ fn coerceInMemoryAllowedFns( } for (dest_info.param_types, 0..) |dest_param_ty, i| { - const src_param_ty = src_info.param_types[i]; + const src_param_ty = src_info.param_types[i].toType(); - if (dest_info.comptime_params[i] != src_info.comptime_params[i]) { + const i_small = @intCast(u5, i); + if (dest_info.paramIsComptime(i_small) != src_info.paramIsComptime(i_small)) { return InMemoryCoercionResult{ .fn_param_comptime = .{ .index = i, - .wanted = dest_info.comptime_params[i], + .wanted = dest_info.paramIsComptime(i_small), } }; } // Note: Cast direction is reversed here. - const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); + const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty.toType(), false, target, dest_src, src_src); if (param != .ok) { return InMemoryCoercionResult{ .fn_param = .{ .child = try param.dupe(sema.arena), .actual = src_param_ty, - .wanted = dest_param_ty, + .wanted = dest_param_ty.toType(), .index = i, } }; } @@ -31205,17 +31199,17 @@ fn resolvePeerTypes( return chosen_ty; } -pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileError!void { +pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileError!void { const mod = sema.mod; - try sema.resolveTypeFully(fn_info.return_type); + try sema.resolveTypeFully(fn_info.return_type.toType()); - if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError(mod)) { + if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.toType().isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } for (fn_info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty); + try sema.resolveTypeFully(param_ty.toType()); } } @@ -31286,16 +31280,16 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { return sema.resolveTypeLayout(payload_ty); }, .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (info.is_generic) { // Resolving of generic function types is deferred to when // the function is instantiated. return; } for (info.param_types) |param_ty| { - try sema.resolveTypeLayout(param_ty); + try sema.resolveTypeLayout(param_ty.toType()); } - try sema.resolveTypeLayout(info.return_type); + try sema.resolveTypeLayout(info.return_type.toType()); }, else => {}, } @@ -31615,15 +31609,13 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_merged, => false, - .function => true, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .pointer => { const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.resolveTypeRequiresComptime(child_ty); } @@ -31644,7 +31636,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.resolveTypeRequiresComptime(child_ty); } @@ -31653,6 +31645,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), .error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()), + .func_type => true, + .simple_type => |t| switch (t) { .f16, .f32, @@ -31799,16 +31793,16 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { }, .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()), .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (info.is_generic) { // Resolving of generic function types is deferred to when // the function is instantiated. return; } for (info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty); + try sema.resolveTypeFully(param_ty.toType()); } - try sema.resolveTypeFully(info.return_type); + try sema.resolveTypeFully(info.return_type.toType()); }, else => {}, } @@ -31881,7 +31875,6 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .none => return ty, .u1_type, - .u5_type, .u8_type, .i8_type, .u16_type, @@ -31941,8 +31934,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .zero_u8 => unreachable, .one => unreachable, .one_usize => unreachable, - .one_u5 => unreachable, - .four_u5 => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, .negative_one => unreachable, .calling_convention_c => unreachable, .calling_convention_inline => unreachable, @@ -32083,14 +32076,14 @@ fn resolveInferredErrorSet( // `*Module.Fn`. Not only is the function not relevant to the inferred error set // in this case, it may be a generic function which would cause an assertion failure // if we called `ensureFuncBodyAnalyzed` on it here. - const ies_func_owner_decl = sema.mod.declPtr(ies.func.owner_decl); - const ies_func_info = ies_func_owner_decl.ty.fnInfo(); + const ies_func_owner_decl = mod.declPtr(ies.func.owner_decl); + const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, // so here we can simply skip this case. - if (ies_func_info.return_type.isGenericPoison()) { + if (ies_func_info.return_type == .generic_poison_type) { assert(ies_func_info.cc == .Inline); - } else if (ies_func_info.return_type.errorUnionSet().castTag(.error_set_inferred).?.data == ies) { + } else if (ies_func_info.return_type.toType().errorUnionSet().castTag(.error_set_inferred).?.data == ies) { if (ies_func_info.is_generic) { const msg = msg: { const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); @@ -32285,7 +32278,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const prev_field_index = struct_obj.fields.getIndex(field_name).?; const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index }); - try sema.mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); + try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "struct declared here", .{}); break :msg msg; }; @@ -32387,7 +32380,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field); @@ -32402,7 +32395,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty); @@ -32580,7 +32573,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) { - return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(sema.mod)}); + return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(mod)}); } if (fields_len > 0) { @@ -32590,7 +32583,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(&block_scope, tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{ - int_tag_ty.fmt(sema.mod), + int_tag_ty.fmt(mod), fields_len - 1, }); break :msg msg; @@ -32605,7 +32598,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { union_obj.tag_ty = provided_ty; const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { .enum_type => |x| x, - else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}), + else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}), }; // The fields of the union must match the enum exactly. // A flag per field is used to check for missing and extraneous fields. @@ -32705,7 +32698,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy; const msg = msg: { - const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, sema.mod)}); + const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, mod)}); errdefer msg.destroy(gpa); try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -32751,7 +32744,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { const prev_field_index = union_obj.fields.getIndex(field_name).?; const prev_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = prev_field_index }).lazy; - try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); + try mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "union declared here", .{}); break :msg msg; }; @@ -32766,7 +32759,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .range = .type, }).lazy; const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ - field_name, union_obj.tag_ty.fmt(sema.mod), + field_name, union_obj.tag_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -32800,7 +32793,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .union_field); @@ -32815,7 +32808,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty); @@ -33060,7 +33053,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set, .error_set_merged, .error_union, - .function, .error_set_inferred, .anyframe_T, .pointer, @@ -33087,7 +33079,12 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; } }, - .ptr_type => null, + + .ptr_type, + .error_union_type, + .func_type, + => null, + .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); @@ -33102,13 +33099,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; }, .opt_type => |child| { - if (child.toType().isNoReturn()) { - return Value.null; + if (child == .noreturn_type) { + return try mod.nullValue(ty); } else { return null; } }, - .error_union_type => null, + .simple_type => |t| switch (t) { .f16, .f32, @@ -33674,15 +33671,13 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_set_merged, => false, - .function => true, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, .pointer => { const child_ty = ty.childType(mod); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.typeRequiresComptime(child_ty); } @@ -33703,7 +33698,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); if (child_ty.zigTypeTag(mod) == .Fn) { - return child_ty.fnInfo().is_generic; + return mod.typeToFunc(child_ty).?.is_generic; } else { return sema.typeRequiresComptime(child_ty); } @@ -33714,6 +33709,8 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union_type => |error_union_type| { return sema.typeRequiresComptime(error_union_type.payload_type.toType()); }, + .func_type => true, + .simple_type => |t| return switch (t) { .f16, .f32, @@ -33870,7 +33867,8 @@ fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { /// Synchronize logic with `Type.isFnOrHasRuntimeBits`. pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - const fn_info = ty.fnInfo(); + const mod = sema.mod; + const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; switch (fn_info.cc) { @@ -33878,7 +33876,7 @@ pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { .Inline => return false, else => {}, } - if (try sema.typeRequiresComptime(fn_info.return_type)) { + if (try sema.typeRequiresComptime(fn_info.return_type.toType())) { return false; } return true; diff --git a/src/Zir.zig b/src/Zir.zig index 136920d75d..ec3288620c 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2052,7 +2052,6 @@ pub const Inst = struct { /// and `[]Ref`. pub const Ref = enum(u32) { u1_type = @enumToInt(InternPool.Index.u1_type), - u5_type = @enumToInt(InternPool.Index.u5_type), u8_type = @enumToInt(InternPool.Index.u8_type), i8_type = @enumToInt(InternPool.Index.i8_type), u16_type = @enumToInt(InternPool.Index.u16_type), @@ -2121,8 +2120,8 @@ pub const Inst = struct { zero_u8 = @enumToInt(InternPool.Index.zero_u8), one = @enumToInt(InternPool.Index.one), one_usize = @enumToInt(InternPool.Index.one_usize), - one_u5 = @enumToInt(InternPool.Index.one_u5), - four_u5 = @enumToInt(InternPool.Index.four_u5), + one_u8 = @enumToInt(InternPool.Index.one_u8), + four_u8 = @enumToInt(InternPool.Index.four_u8), negative_one = @enumToInt(InternPool.Index.negative_one), calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 3e893411fc..dea5b63129 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -472,7 +472,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { fn gen(self: *Self) !void { const mod = self.bin_file.options.module.?; - const cc = self.fn_type.fnCallingConvention(); + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // stp fp, lr, [sp, #-16]! _ = try self.addInst(.{ @@ -1146,7 +1146,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the @@ -4271,7 +4271,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); @@ -4428,10 +4428,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4460,10 +4460,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4483,7 +4484,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const mod = self.bin_file.options.module.?; const abi_size = @intCast(u32, ret_ty.abiSize(mod)); const abi_align = ret_ty.abiAlignment(mod); @@ -6226,12 +6226,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -6239,8 +6238,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -6271,8 +6269,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(mod)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size == 0) { result.args[i] = .{ .none = {} }; continue; @@ -6280,14 +6278,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned // values to spread across odd-numbered registers. - if (ty.abiAlignment(mod) == 16 and !self.target.isDarwin()) { + if (ty.toType().abiAlignment(mod) == 16 and !self.target.isDarwin()) { // Round up NCRN to the next even number ncrn += ncrn % 2; } if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) { if (param_size <= 8) { - result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty) }; + result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) }; ncrn += 1; } else { return self.fail("TODO MCValues with multiple registers", .{}); @@ -6298,7 +6296,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided // that the entire stack space consumed by the arguments is 8-byte aligned. - if (ty.abiAlignment(mod) == 8) { + if (ty.toType().abiAlignment(mod) == 8) { if (nsaa % 8 != 0) { nsaa += 8 - (nsaa % 8); } @@ -6336,10 +6334,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (param_types, 0..) |ty, i| { - if (ty.abiSize(mod) > 0) { - const param_size = @intCast(u32, ty.abiSize(mod)); - const param_alignment = ty.abiAlignment(mod); + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 5cc165fdfe..e84c4de981 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -478,7 +478,7 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { fn gen(self: *Self) !void { const mod = self.bin_file.options.module.?; - const cc = self.fn_type.fnCallingConvention(); + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // push {fp, lr} const push_reloc = try self.addNop(); @@ -1123,7 +1123,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the @@ -4250,7 +4250,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // untouched by the parameter passing code const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); @@ -4350,7 +4350,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (RegisterManager.indexOfRegIntoTracked(reg) == null) { // Save function return value into a tracked register log.debug("airCall: copying {} as it is not tracked", .{reg}); - const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(), info.return_value); + const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(mod), info.return_value); break :result MCValue{ .register = new_reg }; } }, @@ -4374,10 +4374,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4406,10 +4406,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); const ptr_ty = self.typeOf(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4429,7 +4430,6 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const mod = self.bin_file.options.module.?; const abi_size = @intCast(u32, ret_ty.abiSize(mod)); const abi_align = ret_ty.abiAlignment(mod); @@ -6171,12 +6171,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -6184,8 +6183,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -6219,11 +6217,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (param_types, 0..) |ty, i| { - if (ty.abiAlignment(mod) == 8) + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiAlignment(mod) == 8) ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); - const param_size = @intCast(u32, ty.abiSize(mod)); + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6235,7 +6233,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; - if (ty.abiAlignment(mod) == 8) + if (ty.toType().abiAlignment(mod) == 8) nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; @@ -6269,10 +6267,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (param_types, 0..) |ty, i| { - if (ty.abiSize(mod) > 0) { - const param_size = @intCast(u32, ty.abiSize(mod)); - const param_alignment = ty.abiAlignment(mod); + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5cf621488e..faa2b2b7d0 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -347,7 +347,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for riscv64. @@ -1803,7 +1804,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn ret(self: *Self, mcv: MCValue) !void { - const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for an instruction, patch this later const index = try self.addInst(.{ @@ -2621,12 +2623,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -2634,8 +2635,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -2655,8 +2655,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var next_stack_offset: u32 = 0; const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(mod)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 0677b72f1a..9d58dd9f29 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -363,7 +363,8 @@ pub fn generate( } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for sparc64. @@ -4458,12 +4459,11 @@ fn realStackOffset(off: u32) u32 { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -4471,8 +4471,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); - const mod = self.bin_file.options.module.?; + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -4495,8 +4494,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) .callee => abi.c_abi_int_param_regs_callee_view, }; - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(mod)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4580,7 +4579,8 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } fn ret(self: *Self, mcv: MCValue) !void { - const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for a branch instruction, patch this later diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 6ae5163714..a950264840 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1145,7 +1145,7 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { fn genFunctype( gpa: Allocator, cc: std.builtin.CallingConvention, - params: []const Type, + params: []const InternPool.Index, return_type: Type, mod: *Module, ) !wasm.Type { @@ -1170,7 +1170,8 @@ fn genFunctype( } // param types - for (params) |param_type| { + for (params) |param_type_ip| { + const param_type = param_type_ip.toType(); if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; switch (cc) { @@ -1234,9 +1235,9 @@ pub fn generate( } fn genFunc(func: *CodeGen) InnerError!void { - const fn_info = func.decl.ty.fnInfo(); const mod = func.bin_file.base.options.module.?; - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); + const fn_info = mod.typeToFunc(func.decl.ty).?; + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); _ = try func.bin_file.storeDeclType(func.decl_index, func_type); @@ -1345,10 +1346,8 @@ const CallWValues = struct { fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { const mod = func.bin_file.base.options.module.?; - const cc = fn_ty.fnCallingConvention(); - const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen()); - defer func.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallWValues = .{ .args = &.{}, .return_value = .none, @@ -1360,8 +1359,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV // Check if we store the result as a pointer to the stack rather than // by value - const fn_info = fn_ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; @@ -1370,8 +1368,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV switch (cc) { .Unspecified => { - for (param_types) |ty| { - if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { + for (fn_info.param_types) |ty| { + if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) { continue; } @@ -1380,8 +1378,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV } }, .C => { - for (param_types) |ty| { - const ty_classes = abi.classifyType(ty, mod); + for (fn_info.param_types) |ty| { + const ty_classes = abi.classifyType(ty.toType(), mod); for (ty_classes) |class| { if (class == .none) continue; try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); @@ -2095,11 +2093,11 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { } fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const fn_info = func.decl.ty.fnInfo(); - const ret_ty = fn_info.return_type; - const mod = func.bin_file.base.options.module.?; + const fn_info = mod.typeToFunc(func.decl.ty).?; + const ret_ty = fn_info.return_type.toType(); // result must be stored in the stack and we return a pointer // to the stack instead @@ -2146,8 +2144,8 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try func.allocStack(Type.usize); // create pointer to void } - const fn_info = func.decl.ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { + const fn_info = mod.typeToFunc(func.decl.ty).?; + if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { break :result func.return_value; } @@ -2163,12 +2161,12 @@ fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(un_op); const ret_ty = func.typeOf(un_op).childType(mod); - const fn_info = func.decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(func.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (ret_ty.isError(mod)) { try func.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, mod)) { + } else if (!firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } @@ -2191,9 +2189,9 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif .Pointer => ty.childType(mod), else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); - const fn_info = fn_ty.fnInfo(); - const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, mod); + const ret_ty = fn_ty.fnReturnType(mod); + const fn_info = mod.typeToFunc(fn_ty).?; + const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod); const callee: ?Decl.Index = blk: { const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; @@ -2203,8 +2201,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif break :blk function.data.owner_decl; } else if (func_val.castTag(.extern_fn)) |extern_fn| { const ext_decl = mod.declPtr(extern_fn.data.owner_decl); - const ext_info = ext_decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, mod); + const ext_info = mod.typeToFunc(ext_decl.ty).?; + var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); const atom = func.bin_file.getAtomPtr(atom_index); @@ -2235,7 +2233,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const arg_ty = func.typeOf(arg); if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); + try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val); } if (callee) |direct| { @@ -2248,7 +2246,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, mod); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type); @@ -2264,7 +2262,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else if (first_param_sret) { break :result_value sret; // TODO: Make this less fragile and optimize - } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { + } else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { const result_local = try func.allocLocal(ret_ty); try func.addLabel(.local_set, result_local.local.value); const scalar_type = abi.scalarType(ret_ty, mod); @@ -2528,7 +2526,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mod = func.bin_file.base.options.module.?; const arg_index = func.arg_index; const arg = func.args[arg_index]; - const cc = func.decl.ty.fnInfo().cc; + const cc = mod.typeToFunc(func.decl.ty).?.cc; const arg_ty = func.typeOfIndex(inst); if (cc == .C) { const arg_classes = abi.classifyType(arg_ty, mod); @@ -2647,9 +2645,9 @@ fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) Inner } switch (op) { - .mul => return func.callIntrinsic("__multi3", &.{ ty, ty }, ty, &.{ lhs, rhs }), - .shr => return func.callIntrinsic("__lshrti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), - .shl => return func.callIntrinsic("__ashlti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), + .mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), + .shr => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), + .shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), .xor => { const result = try func.allocStack(ty); try func.emitWValue(result); @@ -2839,7 +2837,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In }; // fma requires three operands - var param_types_buffer: [3]Type = .{ ty, ty, ty }; + var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index }; const param_types = param_types_buffer[0..args.len]; return func.callIntrinsic(fn_name, param_types, ty, args); } @@ -5298,7 +5296,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! // call __extendhfsf2(f16) f32 const f32_result = try func.callIntrinsic( "__extendhfsf2", - &.{Type.f16}, + &.{.f16_type}, Type.f32, &.{operand}, ); @@ -5316,7 +5314,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! target_util.compilerRtFloatAbbrev(wanted_bits), }) catch unreachable; - return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand}); + return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); } fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { @@ -5347,7 +5345,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } else operand; // call __truncsfhf2(f32) f16 - return func.callIntrinsic("__truncsfhf2", &.{Type.f32}, Type.f16, &.{op}); + return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op}); } var fn_name_buf: [12]u8 = undefined; @@ -5356,7 +5354,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro target_util.compilerRtFloatAbbrev(wanted_bits), }) catch unreachable; - return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand}); + return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); } fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { @@ -5842,7 +5840,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ lhs, lhs_shifted, rhs, rhs_shifted }, ); @@ -5866,19 +5864,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mul1 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ lhs_lsb, zero, rhs_msb, zero }, ); const mul2 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ rhs_lsb, zero, lhs_msb, zero }, ); const mul3 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, + &[_]InternPool.Index{.i64_type} ** 4, Type.i128, &.{ lhs_msb, zero, rhs_msb, zero }, ); @@ -5977,7 +5975,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // call to compiler-rt `fn fmaf(f32, f32, f32) f32` var result = try func.callIntrinsic( "fmaf", - &.{ Type.f32, Type.f32, Type.f32 }, + &.{ .f32_type, .f32_type, .f32_type }, Type.f32, &.{ rhs_ext, lhs_ext, addend_ext }, ); @@ -6707,7 +6705,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn callIntrinsic( func: *CodeGen, name: []const u8, - param_types: []const Type, + param_types: []const InternPool.Index, return_type: Type, args: []const WValue, ) InnerError!WValue { @@ -6735,8 +6733,8 @@ fn callIntrinsic( // Lower all arguments to the stack before we call our function for (args, 0..) |arg, arg_i| { assert(!(want_sret_param and arg == .stack)); - assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime(mod)); - try func.lowerArg(.C, param_types[arg_i], arg); + assert(param_types[arg_i].toType().hasRuntimeBitsIgnoreComptime(mod)); + try func.lowerArg(.C, param_types[arg_i].toType(), arg); } // Actually call our intrinsic @@ -6938,7 +6936,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.end)); const slice_ty = Type.const_slice_u8_sentinel_0; - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, mod); + const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 30c3248360..149f872c9a 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -26,6 +26,7 @@ const Liveness = @import("../../Liveness.zig"); const Lower = @import("Lower.zig"); const Mir = @import("Mir.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Target = std.Target; const Type = @import("../../type.zig").Type; const TypedValue = @import("../../TypedValue.zig"); @@ -697,7 +698,8 @@ pub fn generate( FrameAlloc.init(.{ .size = 0, .alignment = 1 }), ); - var call_info = function.resolveCallingConventionValues(fn_type, &.{}, .args_frame) catch |err| switch (err) { + const fn_info = mod.typeToFunc(fn_type).?; + var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create( @@ -1566,7 +1568,7 @@ fn asmMemoryRegisterImmediate( fn gen(self: *Self) InnerError!void { const mod = self.bin_file.options.module.?; - const cc = self.fn_type.fnCallingConvention(); + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { try self.asmRegister(.{ ._, .push }, .rbp); const backpatch_push_callee_preserved_regs = try self.asmPlaceholder(); @@ -8042,7 +8044,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier else => unreachable, }; - var info = try self.resolveCallingConventionValues(fn_ty, args[fn_ty.fnParamLen()..], .call_frame); + const fn_info = mod.typeToFunc(fn_ty).?; + + var info = try self.resolveCallingConventionValues(fn_info, args[fn_info.param_types.len..], .call_frame); defer info.deinit(self); // We need a properly aligned and sized call frame to be able to call this function. @@ -8083,7 +8087,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_lock = switch (info.return_value.long) { .none, .unreach => null, .indirect => |reg_off| lock: { - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_info.return_type.toType(); const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod)); try self.genSetReg(reg_off.reg, Type.usize, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, @@ -8199,9 +8203,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv.short) { .none => {}, .register => try self.genCopy(ret_ty, self.ret_mcv.short, operand), @@ -11683,18 +11688,23 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues( self: *Self, - fn_ty: Type, + fn_info: InternPool.Key.FuncType, var_args: []const Air.Inst.Ref, stack_frame_base: FrameIndex, ) !CallMCValues { const mod = self.bin_file.options.module.?; - const cc = fn_ty.fnCallingConvention(); - const param_len = fn_ty.fnParamLen(); - const param_types = try self.gpa.alloc(Type, param_len + var_args.len); + const cc = fn_info.cc; + const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len); defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + + for (param_types[0..fn_info.param_types.len], fn_info.param_types) |*dest, src| { + dest.* = src.toType(); + } // TODO: promote var arg types - for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.typeOf(arg); + for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg| { + param_ty.* = self.typeOf(arg); + } + var result: CallMCValues = .{ .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. @@ -11704,7 +11714,7 @@ fn resolveCallingConventionValues( }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_info.return_type.toType(); switch (cc) { .Naked => { diff --git a/src/codegen.zig b/src/codegen.zig index 90b6bfccf2..9eb294feac 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1081,7 +1081,7 @@ fn genDeclRef( // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? if (tv.ty.castPtrToFn(mod)) |fn_ty| { - if (fn_ty.fnInfo().is_generic) { + if (mod.typeToFunc(fn_ty).?.is_generic) { return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) }); } } else if (tv.ty.zigTypeTag(mod) == .Pointer) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f45c178223..601382c1fd 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1507,7 +1507,7 @@ pub const DeclGen = struct { const fn_decl = mod.declPtr(fn_decl_index); const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind); - const fn_info = fn_decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(fn_decl.ty).?; if (fn_info.cc == .Naked) { switch (kind) { .forward => try w.writeAll("zig_naked_decl "), @@ -1517,7 +1517,7 @@ pub const DeclGen = struct { } if (fn_decl.val.castTag(.function)) |func_payload| if (func_payload.data.is_cold) try w.writeAll("zig_cold "); - if (fn_info.return_type.ip_index == .noreturn_type) try w.writeAll("zig_noreturn "); + if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( dg.decl_index, @@ -3455,7 +3455,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { } else { try reap(f, inst, &.{un_op}); // Not even allowed to return void in a naked function. - if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention() != .Naked else true) + if (if (f.object.dg.decl) |decl| decl.ty.fnCallingConvention(mod) != .Naked else true) try writer.writeAll("return;\n"); } return .none; @@ -4094,7 +4094,7 @@ fn airCall( ) !CValue { const mod = f.object.dg.module; // Not even allowed to call panic in a naked function. - if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none; const gpa = f.object.dg.gpa; const writer = f.object.writer(); @@ -4143,7 +4143,7 @@ fn airCall( else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); const lowered_ret_ty = try lowerFnRetTy(ret_ty, mod); const result_local = result: { @@ -4622,8 +4622,9 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnreach(f: *Function) !CValue { + const mod = f.object.dg.module; // Not even allowed to call unreachable in a naked function. - if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention() == .Naked) return .none; + if (f.object.dg.decl) |decl| if (decl.ty.fnCallingConvention(mod) == .Naked) return .none; try f.object.writer().writeAll("zig_unreachable();\n"); return .none; diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index b51d81a30b..a2af395a98 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1720,7 +1720,7 @@ pub const CType = extern union { .Opaque => self.init(.void), .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (!info.is_generic) { if (lookup.isMutable()) { const param_kind: Kind = switch (kind) { @@ -1728,10 +1728,10 @@ pub const CType = extern union { .complete, .parameter, .global => .parameter, .payload => unreachable, }; - _ = try lookup.typeToIndex(info.return_type, param_kind); + _ = try lookup.typeToIndex(info.return_type.toType(), param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; - _ = try lookup.typeToIndex(param_type, param_kind); + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + _ = try lookup.typeToIndex(param_type.toType(), param_kind); } } self.init(if (info.is_var_args) .varargs_function else .function); @@ -2013,7 +2013,7 @@ pub const CType = extern union { .function, .varargs_function, => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const param_kind: Kind = switch (kind) { .forward, .forward_parameter => .forward_parameter, @@ -2023,21 +2023,21 @@ pub const CType = extern union { var c_params_len: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; c_params_len += 1; } const params_pl = try arena.alloc(Index, c_params_len); var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; - params_pl[c_param_i] = store.set.typeToIndex(param_type, mod, param_kind).?; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?; c_param_i += 1; } const fn_pl = try arena.create(Payload.Function); fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .return_type = store.set.typeToIndex(info.return_type, mod, param_kind).?, + .return_type = store.set.typeToIndex(info.return_type.toType(), mod, param_kind).?, .param_types = params_pl, } }; return initPayload(fn_pl); @@ -2145,7 +2145,7 @@ pub const CType = extern union { => { if (ty.zigTypeTag(mod) != .Fn) return false; - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const data = cty.cast(Payload.Function).?.data; const param_kind: Kind = switch (self.kind) { @@ -2154,18 +2154,18 @@ pub const CType = extern union { .payload => unreachable, }; - if (!self.eqlRecurse(info.return_type, data.return_type, param_kind)) + if (!self.eqlRecurse(info.return_type.toType(), data.return_type, param_kind)) return false; var c_param_i: usize = 0; for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; if (c_param_i >= data.param_types.len) return false; const param_cty = data.param_types[c_param_i]; c_param_i += 1; - if (!self.eqlRecurse(param_type, param_cty, param_kind)) + if (!self.eqlRecurse(param_type.toType(), param_cty, param_kind)) return false; } return c_param_i == data.param_types.len; @@ -2258,7 +2258,7 @@ pub const CType = extern union { .function, .varargs_function, => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const param_kind: Kind = switch (self.kind) { .forward, .forward_parameter => .forward_parameter, @@ -2266,10 +2266,10 @@ pub const CType = extern union { .payload => unreachable, }; - self.updateHasherRecurse(hasher, info.return_type, param_kind); + self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind); for (info.param_types) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; - self.updateHasherRecurse(hasher, param_type, param_kind); + if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + self.updateHasherRecurse(hasher, param_type.toType(), param_kind); } }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 3289d389b4..476f73cbe4 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -954,17 +954,17 @@ pub const Object = struct { builder.positionBuilderAtEnd(entry_block); // This gets the LLVM values from the function and stores them in `dg.args`. - const fn_info = decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(decl.ty).?; const sret = firstParamSRet(fn_info, mod); const ret_ptr = if (sret) llvm_func.getParam(0) else null; const gpa = dg.gpa; - if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type)) |s| switch (s) { + if (ccAbiPromoteInt(fn_info.cc, mod, fn_info.return_type.toType())) |s| switch (s) { .signed => dg.addAttr(llvm_func, 0, "signext"), .unsigned => dg.addAttr(llvm_func, 0, "zeroext"), }; - const err_return_tracing = fn_info.return_type.isError(mod) and + const err_return_tracing = fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing; const err_ret_trace = if (err_return_tracing) @@ -986,7 +986,7 @@ pub const Object = struct { .byval => { assert(!it.byval_attr); const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); const param = llvm_func.getParam(llvm_arg_i); try args.ensureUnusedCapacity(1); @@ -1005,7 +1005,7 @@ pub const Object = struct { llvm_arg_i += 1; }, .byref => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1024,7 +1024,7 @@ pub const Object = struct { } }, .byref_mut => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1044,7 +1044,7 @@ pub const Object = struct { }, .abi_sized_int => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1071,7 +1071,7 @@ pub const Object = struct { }, .slice => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, it.zig_index - 1)) |i| { @@ -1104,7 +1104,7 @@ pub const Object = struct { .multiple_llvm_types => { assert(!it.byval_attr); const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(dg.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); @@ -1135,7 +1135,7 @@ pub const Object = struct { args.appendAssumeCapacity(casted); }, .float_array => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1153,7 +1153,7 @@ pub const Object = struct { } }, .i32_array, .i64_array => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const param_llvm_ty = try dg.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1182,7 +1182,7 @@ pub const Object = struct { const line_number = decl.src_line + 1; const is_internal_linkage = decl.val.tag() != .extern_fn and !mod.decl_exports.contains(decl_index); - const noret_bit: c_uint = if (fn_info.return_type.isNoReturn()) + const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type) llvm.DIFlags.NoReturn else 0; @@ -2331,26 +2331,26 @@ pub const Object = struct { return full_di_ty; }, .Fn => { - const fn_info = ty.fnInfo(); + const fn_info = mod.typeToFunc(ty).?; var param_di_types = std.ArrayList(*llvm.DIType).init(gpa); defer param_di_types.deinit(); // Return type goes first. - if (fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) { const sret = firstParamSRet(fn_info, mod); - const di_ret_ty = if (sret) Type.void else fn_info.return_type; + const di_ret_ty = if (sret) Type.void else fn_info.return_type.toType(); try param_di_types.append(try o.lowerDebugType(di_ret_ty, .full)); if (sret) { - const ptr_ty = try mod.singleMutPtrType(fn_info.return_type); + const ptr_ty = try mod.singleMutPtrType(fn_info.return_type.toType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } } else { try param_di_types.append(try o.lowerDebugType(Type.void, .full)); } - if (fn_info.return_type.isError(mod) and + if (fn_info.return_type.toType().isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType()); @@ -2358,13 +2358,13 @@ pub const Object = struct { } for (fn_info.param_types) |param_ty| { - if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + if (!param_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - if (isByRef(param_ty, mod)) { - const ptr_ty = try mod.singleMutPtrType(param_ty); + if (isByRef(param_ty.toType(), mod)) { + const ptr_ty = try mod.singleMutPtrType(param_ty.toType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } else { - try param_di_types.append(try o.lowerDebugType(param_ty, .full)); + try param_di_types.append(try o.lowerDebugType(param_ty.toType(), .full)); } } @@ -2565,7 +2565,7 @@ pub const DeclGen = struct { if (gop.found_existing) return gop.value_ptr.*; assert(decl.has_tv); - const fn_info = zig_fn_type.fnInfo(); + const fn_info = mod.typeToFunc(zig_fn_type).?; const target = mod.getTarget(); const sret = firstParamSRet(fn_info, mod); @@ -2598,11 +2598,11 @@ pub const DeclGen = struct { dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 dg.addArgAttr(llvm_fn, 0, "noalias"); - const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type); + const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type.toType()); llvm_fn.addSretAttr(raw_llvm_ret_ty); } - const err_return_tracing = fn_info.return_type.isError(mod) and + const err_return_tracing = fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { @@ -2626,13 +2626,13 @@ pub const DeclGen = struct { } if (fn_info.alignment != 0) { - llvm_fn.setAlignment(fn_info.alignment); + llvm_fn.setAlignment(@intCast(c_uint, fn_info.alignment)); } // Function attributes that are independent of analysis results of the function body. dg.addCommonFnAttributes(llvm_fn); - if (fn_info.return_type.isNoReturn()) { + if (fn_info.return_type == .noreturn_type) { dg.addFnAttr(llvm_fn, "noreturn"); } @@ -2645,15 +2645,15 @@ pub const DeclGen = struct { while (it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); if (!isByRef(param_ty, mod)) { dg.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_ty = fn_info.param_types[it.zig_index - 1]; - const param_llvm_ty = try dg.lowerType(param_ty); - const alignment = param_ty.abiAlignment(mod); + const param_llvm_ty = try dg.lowerType(param_ty.toType()); + const alignment = param_ty.toType().abiAlignment(mod); dg.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); }, .byref_mut => { @@ -3142,7 +3142,7 @@ pub const DeclGen = struct { fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type { const mod = dg.module; - const fn_info = fn_ty.fnInfo(); + const fn_info = mod.typeToFunc(fn_ty).?; const llvm_ret_ty = try lowerFnRetTy(dg, fn_info); var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa); @@ -3152,7 +3152,7 @@ pub const DeclGen = struct { try llvm_params.append(dg.context.pointerType(0)); } - if (fn_info.return_type.isError(mod) and + if (fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing) { const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType()); @@ -3163,19 +3163,19 @@ pub const DeclGen = struct { while (it.next()) |lowering| switch (lowering) { .no_bits => continue, .byval => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); try llvm_params.append(try dg.lowerType(param_ty)); }, .byref, .byref_mut => { try llvm_params.append(dg.context.pointerType(0)); }, .abi_sized_int => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const abi_size = @intCast(c_uint, param_ty.abiSize(mod)); try llvm_params.append(dg.context.intType(abi_size * 8)); }, .slice => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) param_ty.optionalChild(mod).slicePtrFieldType(&buf, mod) @@ -3195,7 +3195,7 @@ pub const DeclGen = struct { try llvm_params.append(dg.context.intType(16)); }, .float_array => |count| { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const float_ty = try dg.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); const field_count = @intCast(c_uint, count); const arr_ty = float_ty.arrayType(field_count); @@ -3223,7 +3223,7 @@ pub const DeclGen = struct { const mod = dg.module; const lower_elem_ty = switch (elem_ty.zigTypeTag(mod)) { .Opaque => true, - .Fn => !elem_ty.fnInfo().is_generic, + .Fn => !mod.typeToFunc(elem_ty).?.is_generic, .Array => elem_ty.childType(mod).hasRuntimeBitsIgnoreComptime(mod), else => elem_ty.hasRuntimeBitsIgnoreComptime(mod), }; @@ -4204,7 +4204,7 @@ pub const DeclGen = struct { const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; if ((!is_fn_body and !decl.ty.hasRuntimeBits(mod)) or - (is_fn_body and decl.ty.fnInfo().is_generic)) + (is_fn_body and mod.typeToFunc(decl.ty).?.is_generic)) { return self.lowerPtrToVoid(tv.ty); } @@ -4354,7 +4354,7 @@ pub const DeclGen = struct { llvm_fn: *llvm.Value, param_ty: Type, param_index: u32, - fn_info: Type.Payload.Function.Data, + fn_info: InternPool.Key.FuncType, llvm_arg_i: u32, ) void { const mod = dg.module; @@ -4774,8 +4774,8 @@ pub const FuncGen = struct { .Pointer => callee_ty.childType(mod), else => unreachable, }; - const fn_info = zig_fn_ty.fnInfo(); - const return_type = fn_info.return_type; + const fn_info = mod.typeToFunc(zig_fn_ty).?; + const return_type = fn_info.return_type.toType(); const llvm_fn = try self.resolveInst(pl_op.operand); const target = mod.getTarget(); const sret = firstParamSRet(fn_info, mod); @@ -4790,7 +4790,7 @@ pub const FuncGen = struct { break :blk ret_ptr; }; - const err_return_tracing = fn_info.return_type.isError(mod) and + const err_return_tracing = return_type.isError(mod) and self.dg.module.comp.bin_file.options.error_return_tracing; if (err_return_tracing) { try llvm_args.append(self.err_ret_trace.?); @@ -4971,14 +4971,14 @@ pub const FuncGen = struct { while (it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); if (!isByRef(param_ty, mod)) { self.dg.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index]; + const param_ty = fn_info.param_types[param_index].toType(); const param_llvm_ty = try self.dg.lowerType(param_ty); const alignment = param_ty.abiAlignment(mod); self.dg.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); @@ -4998,7 +4998,7 @@ pub const FuncGen = struct { .slice => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types[it.zig_index - 1].toType(); const ptr_info = param_ty.ptrInfo(mod); const llvm_arg_i = it.llvm_index - 2; @@ -5023,7 +5023,7 @@ pub const FuncGen = struct { }; } - if (return_type.isNoReturn() and attr != .AlwaysTail) { + if (fn_info.return_type == .noreturn_type and attr != .AlwaysTail) { return null; } @@ -5088,9 +5088,9 @@ pub const FuncGen = struct { _ = self.builder.buildRetVoid(); return null; } - const fn_info = self.dg.decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(self.dg.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { - if (fn_info.return_type.isError(mod)) { + if (fn_info.return_type.toType().isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -5135,9 +5135,9 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.typeOf(un_op); const ret_ty = ptr_ty.childType(mod); - const fn_info = self.dg.decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(self.dg.decl.ty).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { - if (fn_info.return_type.isError(mod)) { + if (fn_info.return_type.toType().isError(mod)) { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. @@ -6148,25 +6148,21 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const is_internal_linkage = !mod.decl_exports.contains(decl_index); - var fn_ty_pl: Type.Payload.Function = .{ - .base = .{ .tag = .function }, - .data = .{ - .param_types = &.{}, - .comptime_params = undefined, - .return_type = Type.void, - .alignment = 0, - .noalias_bits = 0, - .cc = .Unspecified, - .is_var_args = false, - .is_generic = false, - .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, - .section_is_generic = false, - .addrspace_is_generic = false, - }, - }; - const fn_ty = Type.initPayload(&fn_ty_pl.base); + const fn_ty = try mod.funcType(.{ + .param_types = &.{}, + .return_type = .void_type, + .alignment = 0, + .noalias_bits = 0, + .comptime_bits = 0, + .cc = .Unspecified, + .is_var_args = false, + .is_generic = false, + .is_noinline = false, + .align_is_generic = false, + .cc_is_generic = false, + .section_is_generic = false, + .addrspace_is_generic = false, + }); const subprogram = dib.createFunction( di_file.toScope(), decl.name, @@ -10546,31 +10542,31 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { } } -fn firstParamSRet(fn_info: Type.Payload.Function.Data, mod: *Module) bool { - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) return false; +fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool { + if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false; const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => return isByRef(fn_info.return_type, mod), + .Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod), .C => switch (target.cpu.arch) { .mips, .mipsel => return false, .x86_64 => switch (target.os.tag) { - .windows => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, - else => return firstParamSRetSystemV(fn_info.return_type, mod), + .windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, + else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), }, - .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, mod)[0] == .indirect, - .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, mod) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { + .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect, + .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) { .memory, .i64_array => return true, .i32_array => |size| return size != 1, .byval => return false, }, - .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, mod) == .memory, + .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, else => return false, // TODO investigate C ABI for other architectures }, - .SysV => return firstParamSRetSystemV(fn_info.return_type, mod), - .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, mod) == .memory, - .Stdcall => return !isScalar(mod, fn_info.return_type), + .SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), + .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, + .Stdcall => return !isScalar(mod, fn_info.return_type.toType()), else => return false, } } @@ -10585,13 +10581,14 @@ fn firstParamSRetSystemV(ty: Type, mod: *Module) bool { /// In order to support the C calling convention, some return types need to be lowered /// completely differently in the function prototype to honor the C ABI, and then /// be effectively bitcasted to the actual return type. -fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { +fn lowerFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { const mod = dg.module; - if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime(mod)) { + const return_type = fn_info.return_type.toType(); + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { // If the return type is an error set or an error union, then we make this // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. - if (fn_info.return_type.isError(mod)) { + if (return_type.isError(mod)) { return dg.lowerType(Type.anyerror); } else { return dg.context.voidType(); @@ -10600,61 +10597,61 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { const target = mod.getTarget(); switch (fn_info.cc) { .Unspecified, .Inline => { - if (isByRef(fn_info.return_type, mod)) { + if (isByRef(return_type, mod)) { return dg.context.voidType(); } else { - return dg.lowerType(fn_info.return_type); + return dg.lowerType(return_type); } }, .C => { switch (target.cpu.arch) { - .mips, .mipsel => return dg.lowerType(fn_info.return_type), + .mips, .mipsel => return dg.lowerType(return_type), .x86_64 => switch (target.os.tag) { .windows => return lowerWin64FnRetTy(dg, fn_info), else => return lowerSystemVFnRetTy(dg, fn_info), }, .wasm32 => { - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } - const classes = wasm_c_abi.classifyType(fn_info.return_type, mod); + const classes = wasm_c_abi.classifyType(return_type, mod); if (classes[0] == .indirect or classes[0] == .none) { return dg.context.voidType(); } assert(classes[0] == .direct and classes[1] == .none); - const scalar_type = wasm_c_abi.scalarType(fn_info.return_type, mod); + const scalar_type = wasm_c_abi.scalarType(return_type, mod); const abi_size = scalar_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); }, .aarch64, .aarch64_be => { - switch (aarch64_c_abi.classifyType(fn_info.return_type, mod)) { + switch (aarch64_c_abi.classifyType(return_type, mod)) { .memory => return dg.context.voidType(), - .float_array => return dg.lowerType(fn_info.return_type), - .byval => return dg.lowerType(fn_info.return_type), + .float_array => return dg.lowerType(return_type), + .byval => return dg.lowerType(return_type), .integer => { - const bit_size = fn_info.return_type.bitSize(mod); + const bit_size = return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => return dg.context.intType(64).arrayType(2), } }, .arm, .armeb => { - switch (arm_c_abi.classifyType(fn_info.return_type, mod, .ret)) { + switch (arm_c_abi.classifyType(return_type, mod, .ret)) { .memory, .i64_array => return dg.context.voidType(), .i32_array => |len| if (len == 1) { return dg.context.intType(32); } else { return dg.context.voidType(); }, - .byval => return dg.lowerType(fn_info.return_type), + .byval => return dg.lowerType(return_type), } }, .riscv32, .riscv64 => { - switch (riscv_c_abi.classifyType(fn_info.return_type, mod)) { + switch (riscv_c_abi.classifyType(return_type, mod)) { .memory => return dg.context.voidType(), .integer => { - const bit_size = fn_info.return_type.bitSize(mod); + const bit_size = return_type.bitSize(mod); return dg.context.intType(@intCast(c_uint, bit_size)); }, .double_integer => { @@ -10664,50 +10661,52 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { }; return dg.context.structType(&llvm_types_buffer, 2, .False); }, - .byval => return dg.lowerType(fn_info.return_type), + .byval => return dg.lowerType(return_type), } }, // TODO investigate C ABI for other architectures - else => return dg.lowerType(fn_info.return_type), + else => return dg.lowerType(return_type), } }, .Win64 => return lowerWin64FnRetTy(dg, fn_info), .SysV => return lowerSystemVFnRetTy(dg, fn_info), .Stdcall => { - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } else { return dg.context.voidType(); } }, - else => return dg.lowerType(fn_info.return_type), + else => return dg.lowerType(return_type), } } -fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { +fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { const mod = dg.module; - switch (x86_64_abi.classifyWindows(fn_info.return_type, mod)) { + const return_type = fn_info.return_type.toType(); + switch (x86_64_abi.classifyWindows(return_type, mod)) { .integer => { - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } else { - const abi_size = fn_info.return_type.abiSize(mod); + const abi_size = return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } }, .win_i128 => return dg.context.intType(64).vectorType(2), .memory => return dg.context.voidType(), - .sse => return dg.lowerType(fn_info.return_type), + .sse => return dg.lowerType(return_type), else => unreachable, } } -fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type { +fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: InternPool.Key.FuncType) !*llvm.Type { const mod = dg.module; - if (isScalar(mod, fn_info.return_type)) { - return dg.lowerType(fn_info.return_type); + const return_type = fn_info.return_type.toType(); + if (isScalar(mod, return_type)) { + return dg.lowerType(return_type); } - const classes = x86_64_abi.classifySystemV(fn_info.return_type, mod, .ret); + const classes = x86_64_abi.classifySystemV(return_type, mod, .ret); if (classes[0] == .memory) { return dg.context.voidType(); } @@ -10748,7 +10747,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm } } if (classes[0] == .integer and classes[1] == .none) { - const abi_size = fn_info.return_type.abiSize(mod); + const abi_size = return_type.abiSize(mod); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False); @@ -10756,7 +10755,7 @@ fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm const ParamTypeIterator = struct { dg: *DeclGen, - fn_info: Type.Payload.Function.Data, + fn_info: InternPool.Key.FuncType, zig_index: u32, llvm_index: u32, llvm_types_len: u32, @@ -10781,7 +10780,7 @@ const ParamTypeIterator = struct { if (it.zig_index >= it.fn_info.param_types.len) return null; const ty = it.fn_info.param_types[it.zig_index]; it.byval_attr = false; - return nextInner(it, ty); + return nextInner(it, ty.toType()); } /// `airCall` uses this instead of `next` so that it can take into account variadic functions. @@ -10793,7 +10792,7 @@ const ParamTypeIterator = struct { return nextInner(it, fg.typeOf(args[it.zig_index])); } } else { - return nextInner(it, it.fn_info.param_types[it.zig_index]); + return nextInner(it, it.fn_info.param_types[it.zig_index].toType()); } } @@ -11009,7 +11008,7 @@ const ParamTypeIterator = struct { } }; -fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTypeIterator { +fn iterateParamTypes(dg: *DeclGen, fn_info: InternPool.Key.FuncType) ParamTypeIterator { return .{ .dg = dg, .fn_info = fn_info, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 32ea975b64..777bb1cff9 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1227,8 +1227,9 @@ pub const DeclGen = struct { }, .Fn => switch (repr) { .direct => { + const fn_info = mod.typeToFunc(ty).?; // TODO: Put this somewhere in Sema.zig - if (ty.fnIsVarArgs()) + if (fn_info.is_var_args) return self.fail("VarArgs functions are unsupported for SPIR-V", .{}); const param_ty_refs = try self.gpa.alloc(CacheRef, ty.fnParamLen()); @@ -1546,18 +1547,17 @@ pub const DeclGen = struct { assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ - .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType()), + .id_result_type = try self.resolveTypeId(decl.ty.fnReturnType(mod)), .id_result = decl_id, .function_control = .{}, // TODO: We can set inline here if the type requires it. .function_type = prototype_id, }); - const params = decl.ty.fnParamLen(); - var i: usize = 0; + const fn_info = mod.typeToFunc(decl.ty).?; - try self.args.ensureUnusedCapacity(self.gpa, params); - while (i < params) : (i += 1) { - const param_type_id = try self.resolveTypeId(decl.ty.fnParamType(i)); + try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len); + for (fn_info.param_types) |param_type| { + const param_type_id = try self.resolveTypeId(param_type.toType()); const arg_result_id = self.spv.allocId(); try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{ .id_result_type = param_type_id, @@ -3338,10 +3338,10 @@ pub const DeclGen = struct { .Pointer => return self.fail("cannot call function pointers", .{}), else => unreachable, }; - const fn_info = zig_fn_ty.fnInfo(); + const fn_info = mod.typeToFunc(zig_fn_ty).?; const return_type = fn_info.return_type; - const result_type_id = try self.resolveTypeId(return_type); + const result_type_id = try self.resolveTypeId(return_type.toType()); const result_id = self.spv.allocId(); const callee_id = try self.resolve(pl_op.operand); @@ -3368,11 +3368,11 @@ pub const DeclGen = struct { .id_ref_3 = params[0..n_params], }); - if (return_type.isNoReturn()) { + if (return_type == .noreturn_type) { try self.func.body.emit(self.spv.gpa, .OpUnreachable, {}); } - if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBitsIgnoreComptime(mod)) { + if (self.liveness.isUnused(inst) or !return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) { return null; } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 452356de2c..efaeebc62e 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1430,7 +1430,7 @@ pub fn updateDeclExports( .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, }; - const decl_cc = exported_decl.ty.fnCallingConvention(); + const decl_cc = exported_decl.ty.fnCallingConvention(mod); if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and self.base.options.link_libc) { diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index b9722f8c95..92ea2a15dc 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1022,7 +1022,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) const decl_name_with_null = decl_name[0 .. decl_name.len + 1]; try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len); - const fn_ret_type = decl.ty.fnReturnType(); + const fn_ret_type = decl.ty.fnReturnType(mod); const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod); if (fn_ret_has_bits) { dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram)); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index fbdcbd5a8e..da25753b95 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -131,12 +131,12 @@ pub fn updateDecl(self: *SpirV, module: *Module, decl_index: Module.Decl.Index) pub fn updateDeclExports( self: *SpirV, - module: *Module, + mod: *Module, decl_index: Module.Decl.Index, exports: []const *Module.Export, ) !void { - const decl = module.declPtr(decl_index); - if (decl.val.tag() == .function and decl.ty.fnCallingConvention() == .Kernel) { + const decl = mod.declPtr(decl_index); + if (decl.val.tag() == .function and decl.ty.fnCallingConvention(mod) == .Kernel) { // TODO: Unify with resolveDecl in spirv.zig. const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { diff --git a/src/target.zig b/src/target.zig index c89f8ce92c..ac78d27c1a 100644 --- a/src/target.zig +++ b/src/target.zig @@ -649,3 +649,14 @@ pub fn compilerRtIntAbbrev(bits: u16) []const u8 { else => "o", // Non-standard }; } + +pub fn fnCallConvAllowsZigTypes(target: std.Target, cc: std.builtin.CallingConvention) bool { + return switch (cc) { + .Unspecified, .Async, .Inline => true, + // For now we want to authorize PTX kernel to use zig objects, even if + // we end up exposing the ABI. The goal is to experiment with more + // integrated CPU/GPU code. + .Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64, + else => false, + }; +} diff --git a/src/type.zig b/src/type.zig index 32fa64a1ac..daf8b305cc 100644 --- a/src/type.zig +++ b/src/type.zig @@ -42,8 +42,6 @@ pub const Type = struct { .error_set_merged, => return .ErrorSet, - .function => return .Fn, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -66,6 +64,7 @@ pub const Type = struct { .union_type => return .Union, .opaque_type => return .Opaque, .enum_type => return .Enum, + .func_type => return .Fn, .simple_type => |s| switch (s) { .f16, .f32, @@ -344,53 +343,6 @@ pub const Type = struct { return true; }, - .function => { - if (b.zigTypeTag(mod) != .Fn) return false; - - const a_info = a.fnInfo(); - const b_info = b.fnInfo(); - - if (!a_info.return_type.isGenericPoison() and - !b_info.return_type.isGenericPoison() and - !eql(a_info.return_type, b_info.return_type, mod)) - return false; - - if (a_info.is_var_args != b_info.is_var_args) - return false; - - if (a_info.is_generic != b_info.is_generic) - return false; - - if (a_info.is_noinline != b_info.is_noinline) - return false; - - if (a_info.noalias_bits != b_info.noalias_bits) - return false; - - if (!a_info.cc_is_generic and a_info.cc != b_info.cc) - return false; - - if (!a_info.align_is_generic and a_info.alignment != b_info.alignment) - return false; - - if (a_info.param_types.len != b_info.param_types.len) - return false; - - for (a_info.param_types, 0..) |a_param_ty, i| { - const b_param_ty = b_info.param_types[i]; - if (a_info.comptime_params[i] != b_info.comptime_params[i]) - return false; - - if (a_param_ty.isGenericPoison()) continue; - if (b_param_ty.isGenericPoison()) continue; - - if (!eql(a_param_ty, b_param_ty, mod)) - return false; - } - - return true; - }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -501,32 +453,6 @@ pub const Type = struct { std.hash.autoHash(hasher, ies); }, - .function => { - std.hash.autoHash(hasher, std.builtin.TypeId.Fn); - - const fn_info = ty.fnInfo(); - if (!fn_info.return_type.isGenericPoison()) { - hashWithHasher(fn_info.return_type, hasher, mod); - } - if (!fn_info.align_is_generic) { - std.hash.autoHash(hasher, fn_info.alignment); - } - if (!fn_info.cc_is_generic) { - std.hash.autoHash(hasher, fn_info.cc); - } - std.hash.autoHash(hasher, fn_info.is_var_args); - std.hash.autoHash(hasher, fn_info.is_generic); - std.hash.autoHash(hasher, fn_info.is_noinline); - std.hash.autoHash(hasher, fn_info.noalias_bits); - - std.hash.autoHash(hasher, fn_info.param_types.len); - for (fn_info.param_types, 0..) |param_ty, i| { - std.hash.autoHash(hasher, fn_info.paramIsComptime(i)); - if (param_ty.isGenericPoison()) continue; - hashWithHasher(param_ty, hasher, mod); - } - }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, @@ -631,30 +557,6 @@ pub const Type = struct { }; }, - .function => { - const payload = self.castTag(.function).?.data; - const param_types = try allocator.alloc(Type, payload.param_types.len); - for (payload.param_types, 0..) |param_ty, i| { - param_types[i] = try param_ty.copy(allocator); - } - const other_comptime_params = payload.comptime_params[0..payload.param_types.len]; - const comptime_params = try allocator.dupe(bool, other_comptime_params); - return Tag.function.create(allocator, .{ - .return_type = try payload.return_type.copy(allocator), - .param_types = param_types, - .cc = payload.cc, - .alignment = payload.alignment, - .is_var_args = payload.is_var_args, - .is_generic = payload.is_generic, - .is_noinline = payload.is_noinline, - .comptime_params = comptime_params.ptr, - .align_is_generic = payload.align_is_generic, - .cc_is_generic = payload.cc_is_generic, - .section_is_generic = payload.section_is_generic, - .addrspace_is_generic = payload.addrspace_is_generic, - .noalias_bits = payload.noalias_bits, - }); - }, .pointer => { const payload = self.castTag(.pointer).?.data; const sent: ?Value = if (payload.sentinel) |some| @@ -766,32 +668,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .function => { - const payload = ty.castTag(.function).?.data; - try writer.writeAll("fn("); - for (payload.param_types, 0..) |param_type, i| { - if (i != 0) try writer.writeAll(", "); - try param_type.dump("", .{}, writer); - } - if (payload.is_var_args) { - if (payload.param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (payload.alignment != 0) { - try writer.print("align({d}) ", .{payload.alignment}); - } - if (payload.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(payload.cc)); - try writer.writeAll(") "); - } - ty = payload.return_type; - continue; - }, - .anyframe_T => { const return_type = ty.castTag(.anyframe_T).?.data; try writer.print("anyframe->", .{}); @@ -909,48 +785,6 @@ pub const Type = struct { try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); }, - .function => { - const fn_info = ty.fnInfo(); - if (fn_info.is_noinline) { - try writer.writeAll("noinline "); - } - try writer.writeAll("fn("); - for (fn_info.param_types, 0..) |param_ty, i| { - if (i != 0) try writer.writeAll(", "); - if (fn_info.paramIsComptime(i)) { - try writer.writeAll("comptime "); - } - if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) { - try writer.writeAll("noalias "); - }; - if (param_ty.isGenericPoison()) { - try writer.writeAll("anytype"); - } else { - try print(param_ty, writer, mod); - } - } - if (fn_info.is_var_args) { - if (fn_info.param_types.len != 0) { - try writer.writeAll(", "); - } - try writer.writeAll("..."); - } - try writer.writeAll(") "); - if (fn_info.alignment != 0) { - try writer.print("align({d}) ", .{fn_info.alignment}); - } - if (fn_info.cc != .Unspecified) { - try writer.writeAll("callconv(."); - try writer.writeAll(@tagName(fn_info.cc)); - try writer.writeAll(") "); - } - if (fn_info.return_type.isGenericPoison()) { - try writer.writeAll("anytype"); - } else { - try print(fn_info.return_type, writer, mod); - } - }, - .error_union => { const error_union = ty.castTag(.error_union).?.data; try print(error_union.error_set, writer, mod); @@ -1158,6 +992,48 @@ pub const Type = struct { const decl = mod.declPtr(enum_type.decl); try decl.renderFullyQualifiedName(mod, writer); }, + .func_type => |fn_info| { + if (fn_info.is_noinline) { + try writer.writeAll("noinline "); + } + try writer.writeAll("fn("); + for (fn_info.param_types, 0..) |param_ty, i| { + if (i != 0) try writer.writeAll(", "); + if (std.math.cast(u5, i)) |index| { + if (fn_info.paramIsComptime(index)) { + try writer.writeAll("comptime "); + } + if (fn_info.paramIsNoalias(index)) { + try writer.writeAll("noalias "); + } + } + if (param_ty == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(param_ty.toType(), writer, mod); + } + } + if (fn_info.is_var_args) { + if (fn_info.param_types.len != 0) { + try writer.writeAll(", "); + } + try writer.writeAll("..."); + } + try writer.writeAll(") "); + if (fn_info.alignment != 0) { + try writer.print("align({d}) ", .{fn_info.alignment}); + } + if (fn_info.cc != .Unspecified) { + try writer.writeAll("callconv(."); + try writer.writeAll(@tagName(fn_info.cc)); + try writer.writeAll(") "); + } + if (fn_info.return_type == .generic_poison_type) { + try writer.writeAll("anytype"); + } else { + try print(fn_info.return_type.toType(), writer, mod); + } + }, // values, not types .undef => unreachable, @@ -1174,6 +1050,11 @@ pub const Type = struct { } } + pub fn toIntern(ty: Type) InternPool.Index { + assert(ty.ip_index != .none); + return ty.ip_index; + } + pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { if (self.ip_index != .none) return self.ip_index.toValue(); switch (self.tag()) { @@ -1223,7 +1104,7 @@ pub const Type = struct { if (ignore_comptime_only) { return true; } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { - return !ty.childType(mod).fnInfo().is_generic; + return !mod.typeToFunc(ty.childType(mod)).?.is_generic; } else if (strat == .sema) { return !(try strat.sema.typeRequiresComptime(ty)); } else { @@ -1231,12 +1112,6 @@ pub const Type = struct { } }, - // These are false because they are comptime-only types. - // These are function *bodies*, not pointers. - // Special exceptions have to be made when emitting functions due to - // this returning false. - .function => return false, - .optional => { const child_ty = ty.optionalChild(mod); if (child_ty.isNoReturn()) { @@ -1262,7 +1137,7 @@ pub const Type = struct { // to comptime-only types do not, with the exception of function pointers. if (ignore_comptime_only) return true; const child_ty = ptr_type.elem_type.toType(); - if (child_ty.zigTypeTag(mod) == .Fn) return !child_ty.fnInfo().is_generic; + if (child_ty.zigTypeTag(mod) == .Fn) return !mod.typeToFunc(child_ty).?.is_generic; if (strat == .sema) return !(try strat.sema.typeRequiresComptime(ty)); return !comptimeOnly(ty, mod); }, @@ -1293,6 +1168,13 @@ pub const Type = struct { } }, .error_union_type => @panic("TODO"), + + // These are function *bodies*, not pointers. + // They return false here because they are comptime-only types. + // Special exceptions have to be made when emitting functions due to + // this returning false. + .func_type => false, + .simple_type => |t| switch (t) { .f16, .f32, @@ -1436,8 +1318,6 @@ pub const Type = struct { .error_set_single, .error_set_inferred, .error_set_merged, - // These are function bodies, not function pointers. - .function, .error_union, .anyframe_T, => false, @@ -1448,12 +1328,21 @@ pub const Type = struct { .optional => ty.isPtrLikeOptional(mod), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => true, - .ptr_type => true, + .int_type, + .ptr_type, + .vector_type, + => true, + + .error_union_type, + .anon_struct_type, + .opaque_type, + // These are function bodies, not function pointers. + .func_type, + => false, + .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), - .vector_type => true, .opt_type => |child| child.toType().isPtrLikeOptional(mod), - .error_union_type => false, + .simple_type => |t| switch (t) { .f16, .f32, @@ -1509,12 +1398,10 @@ pub const Type = struct { }; return struct_obj.layout != .Auto; }, - .anon_struct_type => false, .union_type => |union_type| switch (union_type.runtime_tag) { .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, .tagged => false, }, - .opaque_type => false, .enum_type => |enum_type| switch (enum_type.tag_mode) { .auto => false, .explicit, .nonexhaustive => true, @@ -1546,7 +1433,7 @@ pub const Type = struct { pub fn isFnOrHasRuntimeBits(ty: Type, mod: *Module) bool { switch (ty.zigTypeTag(mod)) { .Fn => { - const fn_info = ty.fnInfo(); + const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; switch (fn_info.cc) { @@ -1555,7 +1442,7 @@ pub const Type = struct { .Inline => return false, else => {}, } - if (fn_info.return_type.comptimeOnly(mod)) return false; + if (fn_info.return_type.toType().comptimeOnly(mod)) return false; return true; }, else => return ty.hasRuntimeBits(mod), @@ -1707,13 +1594,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - // represents machine code; not a pointer - .function => { - const alignment = ty.castTag(.function).?.data.alignment; - if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; - return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; - }, - .pointer, .anyframe_T, => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, @@ -1753,6 +1633,13 @@ pub const Type = struct { .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), + // represents machine code; not a pointer + .func_type => |func_type| { + const alignment = @intCast(u32, func_type.alignment); + if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; + return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; + }, + .simple_type => |t| switch (t) { .bool, .atomic_order, @@ -2086,7 +1973,6 @@ pub const Type = struct { .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - .function => unreachable, // represents machine code; not a pointer .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, @@ -2187,6 +2073,7 @@ pub const Type = struct { .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), .error_union_type => @panic("TODO"), + .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { .bool, .atomic_order, @@ -2408,7 +2295,6 @@ pub const Type = struct { switch (ty.ip_index) { .none => switch (ty.tag()) { - .function => unreachable, // represents machine code; not a pointer .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, @@ -2453,6 +2339,7 @@ pub const Type = struct { }, .opt_type => @panic("TODO"), .error_union_type => @panic("TODO"), + .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { .f16 => return 16, .f32 => return 32, @@ -3271,6 +3158,7 @@ pub const Type = struct { .opt_type => unreachable, .error_union_type => unreachable, + .func_type => unreachable, .simple_type => unreachable, // handled via Index enum tag above .union_type => unreachable, @@ -3356,54 +3244,22 @@ pub const Type = struct { }; } - /// Asserts the type is a function. - pub fn fnParamLen(self: Type) usize { - return self.castTag(.function).?.data.param_types.len; - } - - /// Asserts the type is a function. The length of the slice must be at least the length - /// given by `fnParamLen`. - pub fn fnParamTypes(self: Type, types: []Type) void { - const payload = self.castTag(.function).?.data; - @memcpy(types[0..payload.param_types.len], payload.param_types); - } - - /// Asserts the type is a function. - pub fn fnParamType(self: Type, index: usize) Type { - switch (self.tag()) { - .function => { - const payload = self.castTag(.function).?.data; - return payload.param_types[index]; - }, - - else => unreachable, - } - } - /// Asserts the type is a function or a function pointer. - pub fn fnReturnType(ty: Type) Type { - const fn_ty = switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.pointee_type, - .function => ty, - else => unreachable, - }; - return fn_ty.castTag(.function).?.data.return_type; + pub fn fnReturnType(ty: Type, mod: *Module) Type { + return fnReturnTypeIp(ty, mod.intern_pool); } - /// Asserts the type is a function. - pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention { - return self.castTag(.function).?.data.cc; + pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type { + return switch (ip.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type, + .func_type => |func_type| func_type.return_type, + else => unreachable, + }.toType(); } /// Asserts the type is a function. - pub fn fnCallingConventionAllowsZigTypes(target: Target, cc: std.builtin.CallingConvention) bool { - return switch (cc) { - .Unspecified, .Async, .Inline => true, - // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. - // The goal is to experiment with more integrated CPU/GPU code. - .Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64, - else => false, - }; + pub fn fnCallingConvention(ty: Type, mod: *Module) std.builtin.CallingConvention { + return mod.intern_pool.indexToKey(ty.ip_index).func_type.cc; } pub fn isValidParamType(self: Type, mod: *const Module) bool { @@ -3421,12 +3277,8 @@ pub const Type = struct { } /// Asserts the type is a function. - pub fn fnIsVarArgs(self: Type) bool { - return self.castTag(.function).?.data.is_var_args; - } - - pub fn fnInfo(ty: Type) Payload.Function.Data { - return ty.castTag(.function).?.data; + pub fn fnIsVarArgs(ty: Type, mod: *Module) bool { + return mod.intern_pool.indexToKey(ty.ip_index).func_type.is_var_args; } pub fn isNumeric(ty: Type, mod: *const Module) bool { @@ -3474,7 +3326,6 @@ pub const Type = struct { .error_set_single, .error_set, .error_set_merged, - .function, .error_set_inferred, .anyframe_T, .pointer, @@ -3500,7 +3351,12 @@ pub const Type = struct { return null; } }, - .ptr_type => return null, + + .ptr_type, + .error_union_type, + .func_type, + => return null, + .array_type => |array_type| { if (array_type.len == 0) return Value.initTag(.empty_array); @@ -3514,13 +3370,13 @@ pub const Type = struct { return null; }, .opt_type => |child| { - if (child.toType().isNoReturn()) { - return Value.null; + if (child == .noreturn_type) { + return try mod.nullValue(ty); } else { return null; } }, - .error_union_type => return null, + .simple_type => |t| switch (t) { .f16, .f32, @@ -3682,9 +3538,6 @@ pub const Type = struct { .error_set_merged, => false, - // These are function bodies, not function pointers. - .function => true, - .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, @@ -3721,6 +3574,9 @@ pub const Type = struct { .vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod), .opt_type => |child| child.toType().comptimeOnly(mod), .error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod), + // These are function bodies, not function pointers. + .func_type => true, + .simple_type => |t| switch (t) { .f16, .f32, @@ -4367,6 +4223,10 @@ pub const Type = struct { return ty.ip_index == .generic_poison_type; } + pub fn isBoundFn(ty: Type) bool { + return ty.ip_index == .none and ty.tag() == .bound_fn; + } + /// This enum does not directly correspond to `std.builtin.TypeId` because /// it has extra enum tags in it, as a way of using less memory. For example, /// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types @@ -4383,7 +4243,6 @@ pub const Type = struct { // After this, the tag requires a payload. pointer, - function, optional, error_union, anyframe_T, @@ -4411,7 +4270,6 @@ pub const Type = struct { .error_set_merged => Payload.ErrorSetMerged, .pointer => Payload.Pointer, - .function => Payload.Function, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, }; @@ -4508,36 +4366,6 @@ pub const Type = struct { data: u16, }; - pub const Function = struct { - pub const base_tag = Tag.function; - - base: Payload = Payload{ .tag = base_tag }, - data: Data, - - // TODO look into optimizing this memory to take fewer bytes - pub const Data = struct { - param_types: []Type, - comptime_params: [*]bool, - return_type: Type, - /// If zero use default target function code alignment. - alignment: u32, - noalias_bits: u32, - cc: std.builtin.CallingConvention, - is_var_args: bool, - is_generic: bool, - is_noinline: bool, - align_is_generic: bool, - cc_is_generic: bool, - section_is_generic: bool, - addrspace_is_generic: bool, - - pub fn paramIsComptime(self: @This(), i: usize) bool { - assert(i < self.param_types.len); - return self.comptime_params[i]; - } - }; - }; - pub const ErrorSet = struct { pub const base_tag = Tag.error_set; diff --git a/src/value.zig b/src/value.zig index 50e3fc8061..35d144f912 100644 --- a/src/value.zig +++ b/src/value.zig @@ -602,6 +602,11 @@ pub const Value = struct { return result; } + pub fn toIntern(val: Value) InternPool.Index { + assert(val.ip_index != .none); + return val.ip_index; + } + /// Asserts that the value is representable as a type. pub fn toType(self: Value) Type { if (self.ip_index != .none) return self.ip_index.toType(); -- cgit v1.2.3 From 7bf91fc79ac9e4eae575baf3a2ca9549bc3bf6c2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 18 May 2023 22:02:55 -0700 Subject: compiler: eliminate legacy Type.Tag.pointer Now pointer types are stored only in InternPool. --- src/InternPool.zig | 55 ++--- src/Module.zig | 36 +-- src/Sema.zig | 119 ++++------ src/arch/aarch64/CodeGen.zig | 3 +- src/arch/arm/CodeGen.zig | 3 +- src/arch/sparc64/CodeGen.zig | 3 +- src/arch/x86_64/CodeGen.zig | 21 +- src/codegen.zig | 9 +- src/codegen/c.zig | 56 ++--- src/codegen/c/type.zig | 3 +- src/codegen/llvm.zig | 119 ++++------ src/codegen/spirv.zig | 6 +- src/link/Dwarf.zig | 3 +- src/type.zig | 520 ++++++++++--------------------------------- src/value.zig | 12 +- 15 files changed, 295 insertions(+), 673 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index bf48aeda84..81035bffc5 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -186,17 +186,11 @@ pub const Key = union(enum) { pub const PtrType = struct { elem_type: Index, sentinel: Index = .none, - /// If zero use pointee_type.abiAlignment() - /// When creating pointer types, if alignment is equal to pointee type - /// abi alignment, this value should be set to 0 instead. - /// - /// Please don't change this to u32 or u29. If you want to save bits, - /// migrate the rest of the codebase to use the `Alignment` type rather - /// than using byte units. The LLVM backend can only handle `c_uint` - /// byte units; we can emit a semantic analysis error if alignment that - /// overflows that amount is attempted to be used, but it shouldn't - /// affect the other backends. - alignment: u64 = 0, + /// `none` indicates the ABI alignment of the pointee_type. In this + /// case, this field *must* be set to `none`, otherwise the + /// `InternPool` equality and hashing functions will return incorrect + /// results. + alignment: Alignment = .none, /// If this is non-zero it means the pointer points to a sub-byte /// range of data, which is backed by a "host integer" with this /// number of bytes. @@ -378,15 +372,11 @@ pub const Key = union(enum) { /// Tells whether a parameter is noalias. See `paramIsNoalias` helper /// method for accessing this. noalias_bits: u32, - /// If zero use default target function code alignment. - /// - /// Please don't change this to u32 or u29. If you want to save bits, - /// migrate the rest of the codebase to use the `Alignment` type rather - /// than using byte units. The LLVM backend can only handle `c_uint` - /// byte units; we can emit a semantic analysis error if alignment that - /// overflows that amount is attempted to be used, but it shouldn't - /// affect the other backends. - alignment: u64, + /// `none` indicates the function has the default alignment for + /// function code on the target. In this case, this field *must* be set + /// to `none`, otherwise the `InternPool` equality and hashing + /// functions will return incorrect results. + alignment: Alignment, cc: std.builtin.CallingConvention, is_var_args: bool, is_generic: bool, @@ -1500,6 +1490,13 @@ pub const Alignment = enum(u6) { none = std.math.maxInt(u6), _, + pub fn toByteUnitsOptional(a: Alignment) ?u64 { + return switch (a) { + .none => null, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + pub fn toByteUnits(a: Alignment, default: u64) u64 { return switch (a) { .none => default, @@ -1509,8 +1506,14 @@ pub const Alignment = enum(u6) { pub fn fromByteUnits(n: u64) Alignment { if (n == 0) return .none; + assert(std.math.isPowerOfTwo(n)); return @intToEnum(Alignment, @ctz(n)); } + + pub fn fromNonzeroByteUnits(n: u64) Alignment { + assert(n != 0); + return fromByteUnits(n); + } }; /// Used for non-sentineled arrays that have length fitting in u32, as well as @@ -1773,7 +1776,7 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { return .{ .ptr_type = .{ .elem_type = ptr_info.child, .sentinel = ptr_info.sentinel, - .alignment = ptr_info.flags.alignment.toByteUnits(0), + .alignment = ptr_info.flags.alignment, .size = ptr_info.flags.size, .is_const = ptr_info.flags.is_const, .is_volatile = ptr_info.flags.is_volatile, @@ -2013,7 +2016,7 @@ fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { .return_type = type_function.data.return_type, .comptime_bits = type_function.data.comptime_bits, .noalias_bits = type_function.data.noalias_bits, - .alignment = type_function.data.flags.alignment.toByteUnits(0), + .alignment = type_function.data.flags.alignment, .cc = type_function.data.flags.cc, .is_var_args = type_function.data.flags.is_var_args, .is_generic = type_function.data.flags.is_generic, @@ -2100,16 +2103,18 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } + const is_allowzero = ptr_type.is_allowzero or ptr_type.size == .C; + ip.items.appendAssumeCapacity(.{ .tag = .type_pointer, .data = try ip.addExtra(gpa, Pointer{ .child = ptr_type.elem_type, .sentinel = ptr_type.sentinel, .flags = .{ - .alignment = Alignment.fromByteUnits(ptr_type.alignment), + .alignment = ptr_type.alignment, .is_const = ptr_type.is_const, .is_volatile = ptr_type.is_volatile, - .is_allowzero = ptr_type.is_allowzero, + .is_allowzero = is_allowzero, .size = ptr_type.size, .address_space = ptr_type.address_space, .vector_index = ptr_type.vector_index, @@ -2316,7 +2321,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .comptime_bits = func_type.comptime_bits, .noalias_bits = func_type.noalias_bits, .flags = .{ - .alignment = Alignment.fromByteUnits(func_type.alignment), + .alignment = func_type.alignment, .cc = func_type.cc, .is_var_args = func_type.is_var_args, .is_generic = func_type.is_generic, diff --git a/src/Module.zig b/src/Module.zig index 0a063a8ddc..5cd0d237b4 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6532,8 +6532,7 @@ pub fn populateTestFunctions( try mod.ensureDeclAnalyzed(decl_index); } const decl = mod.declPtr(decl_index); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf, mod).childType(mod); + const tmp_test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions @@ -6843,28 +6842,31 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type } pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - if (child_type.ip_index == .none) { - // TODO remove this after all types can be represented via the InternPool - return Type.Tag.pointer.create(mod.tmp_hack_arena.allocator(), .{ - .pointee_type = child_type, - .@"addrspace" = .generic, - }); - } return ptrType(mod, .{ .elem_type = child_type.ip_index }); } pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - if (child_type.ip_index == .none) { - // TODO remove this after all types can be represented via the InternPool - return Type.Tag.pointer.create(mod.tmp_hack_arena.allocator(), .{ - .pointee_type = child_type, - .mutable = false, - .@"addrspace" = .generic, - }); - } return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); } +pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { + const info = ptr_ty.ptrInfoIp(mod.intern_pool); + return mod.ptrType(.{ + .elem_type = new_child.toIntern(), + + .sentinel = info.sentinel, + .alignment = info.alignment, + .host_size = info.host_size, + .bit_offset = info.bit_offset, + .vector_index = info.vector_index, + .size = info.size, + .is_const = info.is_const, + .is_volatile = info.is_volatile, + .is_allowzero = info.is_allowzero, + .address_space = info.address_space, + }); +} + pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type { return (try intern(mod, .{ .func_type = info })).toType(); } diff --git a/src/Sema.zig b/src/Sema.zig index 8492fd441f..74efe9d141 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -9163,7 +9163,7 @@ fn funcCommon( .return_type = return_type.toIntern(), .cc = cc_resolved, .cc_is_generic = cc == null, - .alignment = alignment orelse 0, + .alignment = if (alignment) |a| InternPool.Alignment.fromByteUnits(a) else .none, .align_is_generic = alignment == null, .section_is_generic = section == .generic, .addrspace_is_generic = address_space == null, @@ -17740,10 +17740,10 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air extra_i += 1; const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src); const val = try sema.resolveConstValue(block, sentinel_src, coerced, "pointer sentinel value must be comptime-known"); - break :blk val; - } else null; + break :blk val.toIntern(); + } else .none; - const abi_align: u32 = if (inst_data.flags.has_align) blk: { + const abi_align: InternPool.Alignment = if (inst_data.flags.has_align) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src); @@ -17752,13 +17752,13 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air // which case we can make this 0 without resolving it. if (val.castTag(.lazy_align)) |payload| { if (payload.data.eql(elem_ty, sema.mod)) { - break :blk 0; + break :blk .none; } } const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); try sema.validateAlign(block, align_src, abi_align); - break :blk abi_align; - } else 0; + break :blk InternPool.Alignment.fromByteUnits(abi_align); + } else .none; const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); @@ -17789,7 +17789,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } const fn_align = mod.typeToFunc(elem_ty).?.alignment; - if (inst_data.flags.has_align and abi_align != 0 and fn_align != 0 and + if (inst_data.flags.has_align and abi_align != .none and fn_align != .none and abi_align != fn_align) { return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{}); @@ -17815,16 +17815,16 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } } - const ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = elem_ty, + const ty = try mod.ptrType(.{ + .elem_type = elem_ty.toIntern(), .sentinel = sentinel, - .@"align" = abi_align, - .@"addrspace" = address_space, + .alignment = abi_align, + .address_space = address_space, .bit_offset = bit_offset, .host_size = host_size, - .mutable = inst_data.flags.is_mutable, - .@"allowzero" = inst_data.flags.is_allowzero, - .@"volatile" = inst_data.flags.is_volatile, + .is_const = !inst_data.flags.is_mutable, + .is_allowzero = inst_data.flags.is_allowzero, + .is_volatile = inst_data.flags.is_volatile, .size = inst_data.size, }); return sema.addType(ty); @@ -18905,10 +18905,13 @@ fn zirReify( if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?); + + const abi_align = InternPool.Alignment.fromByteUnits( + (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?, + ); const unresolved_elem_ty = child_val.toType(); - const elem_ty = if (abi_align == 0) + const elem_ty = if (abi_align == .none) unresolved_elem_ty else t: { const elem_ty = try sema.resolveTypeFields(unresolved_elem_ty); @@ -18918,18 +18921,21 @@ fn zirReify( const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); - var actual_sentinel: ?Value = null; - if (!sentinel_val.isNull(mod)) { - if (ptr_size == .One or ptr_size == .C) { - return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); + const actual_sentinel: InternPool.Index = s: { + if (!sentinel_val.isNull(mod)) { + if (ptr_size == .One or ptr_size == .C) { + return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); + } + const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; + const ptr_ty = try Type.ptr(sema.arena, mod, .{ + .@"addrspace" = .generic, + .pointee_type = try elem_ty.copy(sema.arena), + }); + const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; + break :s sent_val.toIntern(); } - const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; - const ptr_ty = try Type.ptr(sema.arena, mod, .{ - .@"addrspace" = .generic, - .pointee_type = try elem_ty.copy(sema.arena), - }); - actual_sentinel = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; - } + break :s .none; + }; if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "pointer to noreturn not allowed", .{}); @@ -18938,7 +18944,7 @@ fn zirReify( return sema.fail(block, src, "function pointers must be single pointers", .{}); } const fn_align = mod.typeToFunc(elem_ty).?.alignment; - if (abi_align != 0 and fn_align != 0 and + if (abi_align != .none and fn_align != .none and abi_align != fn_align) { return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{}); @@ -18964,14 +18970,14 @@ fn zirReify( } } - const ty = try Type.ptr(sema.arena, mod, .{ + const ty = try mod.ptrType(.{ .size = ptr_size, - .mutable = !is_const_val.toBool(mod), - .@"volatile" = is_volatile_val.toBool(mod), - .@"align" = abi_align, - .@"addrspace" = mod.toEnum(std.builtin.AddressSpace, address_space_val), - .pointee_type = try elem_ty.copy(sema.arena), - .@"allowzero" = is_allowzero_val.toBool(mod), + .is_const = is_const_val.toBool(mod), + .is_volatile = is_volatile_val.toBool(mod), + .alignment = abi_align, + .address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val), + .elem_type = elem_ty.toIntern(), + .is_allowzero = is_allowzero_val.toBool(mod), .sentinel = actual_sentinel, }); return sema.addType(ty); @@ -19470,9 +19476,9 @@ fn zirReify( } const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod)); if (alignment == target_util.defaultFunctionAlignment(target)) { - break :alignment 0; + break :alignment .none; } else { - break :alignment alignment; + break :alignment InternPool.Alignment.fromByteUnits(alignment); } }; const return_type = return_type_val.optionalValue(mod) orelse @@ -24291,8 +24297,7 @@ fn fieldPtr( const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty; if (mem.eql(u8, field_name, "ptr")) { - const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const slice_ptr_ty = inner_ty.slicePtrFieldType(buf, mod); + const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = slice_ptr_ty, @@ -27914,7 +27919,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), + parent.ty.slicePtrFieldType(mod), &val_ptr.castTag(.slice).?.data.ptr, ptr_elem_ty, parent.decl_ref_mut, @@ -27981,7 +27986,7 @@ fn beginComptimePtrMutation( sema, block, src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), + parent.ty.slicePtrFieldType(mod), &val_ptr.castTag(.slice).?.data.ptr, ptr_elem_ty, parent.decl_ref_mut, @@ -28363,7 +28368,7 @@ fn beginComptimePtrLoad( const slice_val = tv.val.castTag(.slice).?.data; deref.pointee = switch (field_index) { Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer), mod), + .ty = field_ptr.container_ty.slicePtrFieldType(mod), .val = slice_val.ptr, }, Value.Payload.Slice.len_index => TypedValue{ @@ -29454,8 +29459,7 @@ fn analyzeSlicePtr( slice_ty: Type, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const result_ty = slice_ty.slicePtrFieldType(buf, mod); + const result_ty = slice_ty.slicePtrFieldType(mod); if (try sema.resolveMaybeUndefVal(slice)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(result_ty); return sema.addConstant(result_ty, val.slicePtr()); @@ -31611,15 +31615,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - .pointer => { - const child_ty = ty.childType(mod); - if (child_ty.zigTypeTag(mod) == .Fn) { - return mod.typeToFunc(child_ty).?.is_generic; - } else { - return sema.resolveTypeRequiresComptime(child_ty); - } - }, - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -33048,7 +33043,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_set_merged, .error_union, .error_set_inferred, - .pointer, => return null, .inferred_alloc_const => unreachable, @@ -33604,12 +33598,6 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { }; switch (ty.tag()) { - .pointer => switch (ty.ptrSize(mod)) { - .Slice => return null, - .C => return ty.optionalChild(mod), - else => return ty, - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, @@ -33638,15 +33626,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - .pointer => { - const child_ty = ty.childType(mod); - if (child_ty.zigTypeTag(mod) == .Fn) { - return mod.typeToFunc(child_ty).?.is_generic; - } else { - return sema.typeRequiresComptime(child_ty); - } - }, - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index dea5b63129..8b84189e18 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3434,8 +3434,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf, mod); + const ptr_ty = slice_ty.slicePtrFieldType(mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index e84c4de981..a6a715c75d 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2432,8 +2432,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf, mod); + const ptr_ty = slice_ty.slicePtrFieldType(mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 9d58dd9f29..072d3ed098 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -2462,8 +2462,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const index_lock: ?RegisterLock = if (index_mcv == .register) self.register_manager.lockRegAssumeUnused(index_mcv.register) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 149f872c9a..e83644269f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4052,8 +4052,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { const elem_ty = slice_ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const index_ty = self.typeOf(rhs); const index_mcv = try self.resolveInst(rhs); @@ -4082,8 +4081,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const slice_ty = self.typeOf(bin_op.lhs); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf, mod); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs); const dst_mcv = try self.allocRegOrMem(inst, false); try self.load(dst_mcv, slice_ptr_field_type, elem_ptr); @@ -4281,11 +4279,7 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { break :blk MCValue{ .register = reg }; } else ptr; - var ptr_tag_pl: Type.Payload.Pointer = .{ - .data = ptr_union_ty.ptrInfo(mod), - }; - ptr_tag_pl.data.pointee_type = tag_ty; - const ptr_tag_ty = Type.initPayload(&ptr_tag_pl.base); + const ptr_tag_ty = try mod.adjustPtrTypeChild(ptr_union_ty, tag_ty); try self.store(ptr_tag_ty, adjusted_ptr, tag); return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -8671,9 +8665,8 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC const pl_ty = opt_ty.optionalChild(mod); - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf, mod) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; @@ -8763,9 +8756,8 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const opt_ty = ptr_ty.childType(mod); const pl_ty = opt_ty.optionalChild(mod); - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) - .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(&ptr_buf, mod) else pl_ty } + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; @@ -10803,8 +10795,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // here to elide it. switch (dst_ptr_ty.ptrSize(mod)) { .Slice => { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf, mod); + const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(mod); // TODO: this only handles slices stored in the stack const ptr = dst_ptr; diff --git a/src/codegen.zig b/src/codegen.zig index 9eb294feac..8e145a3b32 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -347,8 +347,7 @@ pub fn generateSymbol( const slice = typed_value.val.castTag(.slice).?.data; // generate ptr - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf, mod); + const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = slice_ptr_field_type, .val = slice.ptr, @@ -850,10 +849,9 @@ fn lowerParentPtr( reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) { .Pointer => offset: { assert(field_ptr.container_ty.isSlice(mod)); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; break :offset switch (field_ptr.field_index) { 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(&buf, mod).abiSize(mod), + 1 => field_ptr.container_ty.slicePtrFieldType(mod).abiSize(mod), else => unreachable, }; }, @@ -952,8 +950,7 @@ fn lowerDeclRef( const mod = bin_file.options.module.?; if (typed_value.ty.isSlice(mod)) { // generate ptr - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf, mod); + const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = slice_ptr_field_type, .val = typed_value.val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 601382c1fd..c2a108d68e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -566,8 +566,7 @@ pub const DeclGen = struct { try writer.writeAll("){ .ptr = "); } - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), val.slicePtr(), .Initializer); + try dg.renderValue(writer, ty.slicePtrFieldType(mod), val.slicePtr(), .Initializer); const len_val = try mod.intValue(Type.usize, val.sliceLen(mod)); @@ -631,11 +630,7 @@ pub const DeclGen = struct { // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(field_ptr.container_ty, .complete); - var container_ptr_pl: Type.Payload.Pointer = .{ - .data = ptr_ty.ptrInfo(mod), - }; - container_ptr_pl.data.pointee_type = field_ptr.container_ty; - const container_ptr_ty = Type.initPayload(&container_ptr_pl.base); + const container_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, field_ptr.container_ty); switch (fieldLocation( field_ptr.container_ty, @@ -661,11 +656,7 @@ pub const DeclGen = struct { try dg.writeCValue(writer, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl: Type.Payload.Pointer = .{ - .data = ptr_ty.ptrInfo(mod), - }; - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, Type.u8); const byte_offset_val = try mod.intValue(Type.usize, byte_offset); @@ -788,8 +779,7 @@ pub const DeclGen = struct { } try writer.writeAll("{("); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); try dg.renderType(writer, ptr_ty); return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); } else { @@ -1068,10 +1058,9 @@ pub const DeclGen = struct { } const slice = val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(&buf, mod), slice.ptr, initializer_type); + try dg.renderValue(writer, ty.slicePtrFieldType(mod), slice.ptr, initializer_type); try writer.writeAll(", "); try dg.renderValue(writer, Type.usize, slice.len, initializer_type); try writer.writeByte('}'); @@ -1536,8 +1525,8 @@ pub const DeclGen = struct { switch (kind) { .forward => {}, - .complete => if (fn_info.alignment > 0) - try w.print(" zig_align_fn({})", .{fn_info.alignment}), + .complete => if (fn_info.alignment.toByteUnitsOptional()) |a| + try w.print(" zig_align_fn({})", .{a}), else => unreachable, } @@ -1561,8 +1550,8 @@ pub const DeclGen = struct { ); switch (kind) { - .forward => if (fn_info.alignment > 0) - try w.print(" zig_align_fn({})", .{fn_info.alignment}), + .forward => if (fn_info.alignment.toByteUnitsOptional()) |a| + try w.print(" zig_align_fn({})", .{a}), .complete => {}, else => unreachable, } @@ -4062,8 +4051,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.typeOfIndex(inst); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = inst_ty.slicePtrFieldType(&buf, mod); + const ptr_ty = inst_ty.slicePtrFieldType(mod); const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -5047,7 +5035,6 @@ fn airIsNull( const operand_ty = f.typeOf(un_op); const optional_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const payload_ty = optional_ty.optionalChild(mod); - var slice_ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) TypedValue{ .ty = Type.bool, .val = Value.true } @@ -5058,7 +5045,7 @@ fn airIsNull( TypedValue{ .ty = payload_ty, .val = try mod.intValue(payload_ty, 0) } else if (payload_ty.isSlice(mod) and optional_ty.optionalReprIsPayload(mod)) rhs: { try writer.writeAll(".ptr"); - const slice_ptr_ty = payload_ty.slicePtrFieldType(&slice_ptr_buf, mod); + const slice_ptr_ty = payload_ty.slicePtrFieldType(mod); break :rhs TypedValue{ .ty = slice_ptr_ty, .val = Value.null }; } else rhs: { try writer.writeAll(".is_null"); @@ -5278,11 +5265,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { switch (fieldLocation(container_ty, field_ptr_ty, extra.field_index, mod)) { .begin => try f.writeCValue(writer, field_ptr_val, .Initializer), .field => |field| { - var u8_ptr_pl: Type.Payload.Pointer = .{ - .data = field_ptr_ty.ptrInfo(mod), - }; - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8); try writer.writeAll("(("); try f.renderType(writer, u8_ptr_ty); @@ -5295,11 +5278,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll("))"); }, .byte_offset => |byte_offset| { - var u8_ptr_pl: Type.Payload.Pointer = .{ - .data = field_ptr_ty.ptrInfo(mod), - }; - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8); const byte_offset_val = try mod.intValue(Type.usize, byte_offset); @@ -5347,11 +5326,7 @@ fn fieldPtr( try f.writeCValueDerefMember(writer, container_ptr_val, field); }, .byte_offset => |byte_offset| { - var u8_ptr_pl: Type.Payload.Pointer = .{ - .data = field_ptr_ty.ptrInfo(mod), - }; - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(field_ptr_ty, Type.u8); const byte_offset_val = try mod.intValue(Type.usize, byte_offset); @@ -5794,8 +5769,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { // Unfortunately, C does not support any equivalent to // &(*(void *)p)[0], although LLVM does via GetElementPtr if (operand == .undef) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(&buf, mod) }, .Initializer); + try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(mod) }, .Initializer); } else if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try writer.writeAll("&("); try f.writeCValueDeref(writer, operand); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index a2af395a98..70426972af 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1431,8 +1431,7 @@ pub const CType = extern union { .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), .payload => unreachable, }) |fwd_idx| { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| { self.storage = .{ .anon = undefined }; self.storage.anon.fields[0] = .{ diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 476f73cbe4..0c503edee4 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1591,40 +1591,30 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = ty.ptrInfo(mod); + const ptr_info = ty.ptrInfoIp(mod.intern_pool); - if (ptr_info.sentinel != null or - ptr_info.@"addrspace" != .generic or + if (ptr_info.sentinel != .none or + ptr_info.address_space != .generic or ptr_info.bit_offset != 0 or ptr_info.host_size != 0 or ptr_info.vector_index != .none or - ptr_info.@"allowzero" or - !ptr_info.mutable or - ptr_info.@"volatile" or + ptr_info.is_allowzero or + ptr_info.is_const or + ptr_info.is_volatile or ptr_info.size == .Many or ptr_info.size == .C or - !ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) + !ptr_info.elem_type.toType().hasRuntimeBitsIgnoreComptime(mod)) { - var payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = ptr_info.pointee_type, - .sentinel = null, - .@"align" = ptr_info.@"align", - .@"addrspace" = .generic, - .bit_offset = 0, - .host_size = 0, - .@"allowzero" = false, - .mutable = true, - .@"volatile" = false, - .size = switch (ptr_info.size) { - .Many, .C, .One => .One, - .Slice => .Slice, - }, + const bland_ptr_ty = try mod.ptrType(.{ + .elem_type = if (!ptr_info.elem_type.toType().hasRuntimeBitsIgnoreComptime(mod)) + .anyopaque_type + else + ptr_info.elem_type, + .alignment = ptr_info.alignment, + .size = switch (ptr_info.size) { + .Many, .C, .One => .One, + .Slice => .Slice, }, - }; - if (!ptr_info.pointee_type.hasRuntimeBitsIgnoreComptime(mod)) { - payload.data.pointee_type = Type.anyopaque; - } - const bland_ptr_ty = Type.initPayload(&payload.base); + }); const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module }); @@ -1632,8 +1622,7 @@ pub const Object = struct { } if (ty.isSlice(mod)) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); const len_ty = Type.usize; const name = try ty.nameAlloc(gpa, o.module); @@ -1711,7 +1700,7 @@ pub const Object = struct { return full_di_ty; } - const elem_di_ty = try o.lowerDebugType(ptr_info.pointee_type, .fwd); + const elem_di_ty = try o.lowerDebugType(ptr_info.elem_type.toType(), .fwd); const name = try ty.nameAlloc(gpa, o.module); defer gpa.free(name); const ptr_di_ty = dib.createPointerType( @@ -2625,8 +2614,8 @@ pub const DeclGen = struct { }, } - if (fn_info.alignment != 0) { - llvm_fn.setAlignment(@intCast(c_uint, fn_info.alignment)); + if (fn_info.alignment.toByteUnitsOptional()) |a| { + llvm_fn.setAlignment(@intCast(c_uint, a)); } // Function attributes that are independent of analysis results of the function body. @@ -2819,8 +2808,7 @@ pub const DeclGen = struct { .Bool => return dg.context.intType(1), .Pointer => { if (t.isSlice(mod)) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_type = t.slicePtrFieldType(&buf, mod); + const ptr_type = t.slicePtrFieldType(mod); const fields: [2]*llvm.Type = .{ try dg.lowerType(ptr_type), @@ -3176,11 +3164,10 @@ pub const DeclGen = struct { }, .slice => { const param_ty = fn_info.param_types[it.zig_index - 1].toType(); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) - param_ty.optionalChild(mod).slicePtrFieldType(&buf, mod) + param_ty.optionalChild(mod).slicePtrFieldType(mod) else - param_ty.slicePtrFieldType(&buf, mod); + param_ty.slicePtrFieldType(mod); const ptr_llvm_ty = try dg.lowerType(ptr_ty); const len_llvm_ty = try dg.lowerType(Type.usize); @@ -3368,10 +3355,9 @@ pub const DeclGen = struct { }, .slice => { const slice = tv.val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; const fields: [2]*llvm.Value = .{ try dg.lowerValue(.{ - .ty = tv.ty.slicePtrFieldType(&buf, mod), + .ty = tv.ty.slicePtrFieldType(mod), .val = slice.ptr, }), try dg.lowerValue(.{ @@ -4171,8 +4157,7 @@ pub const DeclGen = struct { ) Error!*llvm.Value { const mod = self.module; if (tv.ty.isSlice(mod)) { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = tv.ty.slicePtrFieldType(&buf, mod); + const ptr_ty = tv.ty.slicePtrFieldType(mod); const fields: [2]*llvm.Value = .{ try self.lowerValue(.{ .ty = ptr_ty, @@ -6043,17 +6028,14 @@ pub const FuncGen = struct { const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .elem_type = llvm_field.ty.ip_index, - .alignment = llvm_field.alignment, + .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), }); if (isByRef(field_ty, mod)) { if (canElideLoad(self, body_tail)) return field_ptr; - const field_alignment = if (llvm_field.alignment != 0) - llvm_field.alignment - else - llvm_field.ty.abiAlignment(mod); - return self.loadByRef(field_ptr, field_ty, field_alignment, false); + assert(llvm_field.alignment != 0); + return self.loadByRef(field_ptr, field_ty, llvm_field.alignment, false); } else { return self.load(field_ptr, field_ptr_ty); } @@ -6151,7 +6133,7 @@ pub const FuncGen = struct { const fn_ty = try mod.funcType(.{ .param_types = &.{}, .return_type = .void_type, - .alignment = 0, + .alignment = .none, .noalias_bits = 0, .comptime_bits = 0, .cc = .Unspecified, @@ -6655,8 +6637,7 @@ pub const FuncGen = struct { operand; if (payload_ty.isSlice(mod)) { const slice_ptr = self.builder.buildExtractValue(loaded, 0, ""); - var slice_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(&slice_buf, mod)); + const ptr_ty = try self.dg.lowerType(payload_ty.slicePtrFieldType(mod)); return self.builder.buildICmp(pred, slice_ptr, ptr_ty.constNull(), ""); } return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); @@ -6923,7 +6904,7 @@ pub const FuncGen = struct { const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ .elem_type = llvm_field.ty.ip_index, - .alignment = llvm_field.alignment, + .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), }); return self.load(field_ptr, field_ptr_ty); } @@ -9319,14 +9300,12 @@ pub const FuncGen = struct { const llvm_i = llvmField(result_ty, i, mod).?.index; indices[1] = llvm_u32.constInt(llvm_i, .False); const field_ptr = self.builder.buildInBoundsGEP(llvm_result_ty, alloca_inst, &indices, indices.len, ""); - var field_ptr_payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = self.typeOf(elem), - .@"align" = result_ty.structFieldAlign(i, mod), - .@"addrspace" = .generic, - }, - }; - const field_ptr_ty = Type.initPayload(&field_ptr_payload.base); + const field_ptr_ty = try mod.ptrType(.{ + .elem_type = self.typeOf(elem).toIntern(), + .alignment = InternPool.Alignment.fromNonzeroByteUnits( + result_ty.structFieldAlign(i, mod), + ), + }); try self.store(field_ptr, field_ptr_ty, llvm_elem, .NotAtomic); } @@ -9350,13 +9329,9 @@ pub const FuncGen = struct { const alloca_inst = self.buildAlloca(llvm_result_ty, result_ty.abiAlignment(mod)); const array_info = result_ty.arrayInfo(mod); - var elem_ptr_payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = array_info.elem_type, - .@"addrspace" = .generic, - }, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_payload.base); + const elem_ptr_ty = try mod.ptrType(.{ + .elem_type = array_info.elem_type.toIntern(), + }); for (elements, 0..) |elem, i| { const indices: [2]*llvm.Value = .{ @@ -9476,14 +9451,10 @@ pub const FuncGen = struct { // tag and the payload. const index_type = self.context.intType(32); - var field_ptr_payload: Type.Payload.Pointer = .{ - .data = .{ - .pointee_type = field.ty, - .@"align" = field_align, - .@"addrspace" = .generic, - }, - }; - const field_ptr_ty = Type.initPayload(&field_ptr_payload.base); + const field_ptr_ty = try mod.ptrType(.{ + .elem_type = field.ty.toIntern(), + .alignment = InternPool.Alignment.fromNonzeroByteUnits(field_align), + }); if (layout.tag_size == 0) { const indices: [3]*llvm.Value = .{ index_type.constNull(), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 777bb1cff9..eada74e6d4 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -669,8 +669,7 @@ pub const DeclGen = struct { .slice => { const slice = val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); try self.lower(ptr_ty, slice.ptr); try self.addInt(Type.usize, slice.len); @@ -2991,9 +2990,8 @@ pub const DeclGen = struct { if (optional_ty.optionalReprIsPayload(mod)) { // Pointer payload represents nullability: pointer or slice. - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = if (payload_ty.isSlice(mod)) - payload_ty.slicePtrFieldType(&ptr_buf, mod) + payload_ty.slicePtrFieldType(mod) else payload_ty; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 92ea2a15dc..f4f19f30d0 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -277,8 +277,7 @@ pub const DeclState = struct { // DW.AT.type, DW.FORM.ref4 var index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); - var buf = try arena.create(Type.SlicePtrFieldTypeBuffer); - const ptr_ty = ty.slicePtrFieldType(buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index)); // DW.AT.data_member_location, DW.FORM.udata try dbg_info_buffer.ensureUnusedCapacity(6); diff --git a/src/type.zig b/src/type.zig index 735d532c46..ebe3d52b05 100644 --- a/src/type.zig +++ b/src/type.zig @@ -42,7 +42,6 @@ pub const Type = struct { .error_set_merged, => return .ErrorSet, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, => return .Pointer, @@ -250,17 +249,9 @@ pub const Type = struct { return elem_ty; } + /// Asserts the type is a pointer. pub fn ptrIsMutable(ty: Type, mod: *const Module) bool { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.mutable, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| !ptr_type.is_const, - else => unreachable, - }, - }; + return !mod.intern_pool.indexToKey(ty.ip_index).ptr_type.is_const; } pub const ArrayInfo = struct { @@ -277,24 +268,21 @@ pub const Type = struct { }; } - pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |p| Payload.Pointer.Data.fromKey(p), - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |p| Payload.Pointer.Data.fromKey(p), - else => unreachable, - }, + pub fn ptrInfoIp(ty: Type, ip: InternPool) InternPool.Key.PtrType { + return switch (ip.indexToKey(ty.ip_index)) { + .ptr_type => |p| p, + .opt_type => |child| switch (ip.indexToKey(child)) { + .ptr_type => |p| p, else => unreachable, }, + else => unreachable, }; } + pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { + return Payload.Pointer.Data.fromKey(ptrInfoIp(ty, mod.intern_pool)); + } + pub fn eql(a: Type, b: Type, mod: *Module) bool { if (a.ip_index != .none or b.ip_index != .none) { // The InternPool data structure hashes based on Key to make interned objects @@ -335,7 +323,6 @@ pub const Type = struct { return true; }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, => { @@ -434,7 +421,6 @@ pub const Type = struct { std.hash.autoHash(hasher, ies); }, - .pointer, .inferred_alloc_const, .inferred_alloc_mut, => { @@ -512,26 +498,6 @@ pub const Type = struct { .inferred_alloc_mut, => unreachable, - .pointer => { - const payload = self.castTag(.pointer).?.data; - const sent: ?Value = if (payload.sentinel) |some| - try some.copy(allocator) - else - null; - return Tag.pointer.create(allocator, .{ - .pointee_type = try payload.pointee_type.copy(allocator), - .sentinel = sent, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = payload.size, - }); - }, .error_union => { const payload = self.castTag(.error_union).?.data; return Tag.error_union.create(allocator, .{ @@ -623,41 +589,6 @@ pub const Type = struct { while (true) { const t = ty.tag(); switch (t) { - .pointer => { - const payload = ty.castTag(.pointer).?.data; - if (payload.sentinel) |some| switch (payload.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{some.fmtDebug()}), - .Slice => try writer.print("[:{}]", .{some.fmtDebug()}), - } else switch (payload.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (payload.@"align" != 0 or payload.host_size != 0 or payload.vector_index != .none) { - try writer.print("align({d}", .{payload.@"align"}); - - if (payload.bit_offset != 0 or payload.host_size != 0) { - try writer.print(":{d}:{d}", .{ payload.bit_offset, payload.host_size }); - } - if (payload.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (payload.vector_index != .none) { - try writer.print(":{d}", .{@enumToInt(payload.vector_index)}); - } - try writer.writeAll(") "); - } - if (payload.@"addrspace" != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(payload.@"addrspace")}); - } - if (!payload.mutable) try writer.writeAll("const "); - if (payload.@"volatile") try writer.writeAll("volatile "); - if (payload.@"allowzero" and payload.size != .C) try writer.writeAll("allowzero "); - - ty = payload.pointee_type; - continue; - }, .error_union => { const payload = ty.castTag(.error_union).?.data; try payload.error_set.dump("", .{}, writer); @@ -734,47 +665,6 @@ pub const Type = struct { try print(error_union.payload, writer, mod); }, - .pointer => { - const info = ty.ptrInfo(mod); - - if (info.sentinel) |s| switch (info.size) { - .One, .C => unreachable, - .Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}), - .Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}), - } else switch (info.size) { - .One => try writer.writeAll("*"), - .Many => try writer.writeAll("[*]"), - .C => try writer.writeAll("[*c]"), - .Slice => try writer.writeAll("[]"), - } - if (info.@"align" != 0 or info.host_size != 0 or info.vector_index != .none) { - if (info.@"align" != 0) { - try writer.print("align({d}", .{info.@"align"}); - } else { - const alignment = info.pointee_type.abiAlignment(mod); - try writer.print("align({d}", .{alignment}); - } - - if (info.bit_offset != 0 or info.host_size != 0) { - try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size }); - } - if (info.vector_index == .runtime) { - try writer.writeAll(":?"); - } else if (info.vector_index != .none) { - try writer.print(":{d}", .{@enumToInt(info.vector_index)}); - } - try writer.writeAll(") "); - } - if (info.@"addrspace" != .generic) { - try writer.print("addrspace(.{s}) ", .{@tagName(info.@"addrspace")}); - } - if (!info.mutable) try writer.writeAll("const "); - if (info.@"volatile") try writer.writeAll("volatile "); - if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero "); - - try print(info.pointee_type, writer, mod); - }, - .error_set => { const names = ty.castTag(.error_set).?.data.names.keys(); try writer.writeAll("error{"); @@ -951,8 +841,8 @@ pub const Type = struct { try writer.writeAll("..."); } try writer.writeAll(") "); - if (fn_info.alignment != 0) { - try writer.print("align({d}) ", .{fn_info.alignment}); + if (fn_info.alignment.toByteUnitsOptional()) |a| { + try writer.print("align({d}) ", .{a}); } if (fn_info.cc != .Unspecified) { try writer.writeAll("callconv(."); @@ -1032,20 +922,6 @@ pub const Type = struct { .error_set_merged, => return true, - // Pointers to zero-bit types still have a runtime address; however, pointers - // to comptime-only types do not, with the exception of function pointers. - .pointer => { - if (ignore_comptime_only) { - return true; - } else if (ty.childType(mod).zigTypeTag(mod) == .Fn) { - return !mod.typeToFunc(ty.childType(mod)).?.is_generic; - } else if (strat == .sema) { - return !(try strat.sema.typeRequiresComptime(ty)); - } else { - return !comptimeOnly(ty, mod); - } - }, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -1231,8 +1107,6 @@ pub const Type = struct { .empty_struct_type => false, .none => switch (ty.tag()) { - .pointer => true, - .error_set, .error_set_single, .error_set_inferred, @@ -1410,51 +1284,27 @@ pub const Type = struct { } pub fn ptrAlignmentAdvanced(ty: Type, mod: *Module, opt_sema: ?*Sema) !u32 { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => { - const ptr_info = ty.castTag(.pointer).?.data; - if (ptr_info.@"align" != 0) { - return ptr_info.@"align"; - } else if (opt_sema) |sema| { - const res = try ptr_info.pointee_type.abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } else { - return (ptr_info.pointee_type.abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - } - }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| { - if (ptr_type.alignment != 0) { - return @intCast(u32, ptr_type.alignment); - } else if (opt_sema) |sema| { - const res = try ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); - return res.scalar; - } else { - return (ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; - } - }, - .opt_type => |child| return child.toType().ptrAlignmentAdvanced(mod, opt_sema), - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| { + if (ptr_type.alignment.toByteUnitsOptional()) |a| { + return @intCast(u32, a); + } else if (opt_sema) |sema| { + const res = try ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .{ .sema = sema }); + return res.scalar; + } else { + return (ptr_type.elem_type.toType().abiAlignmentAdvanced(mod, .eager) catch unreachable).scalar; + } }, - } + .opt_type => |child| child.toType().ptrAlignmentAdvanced(mod, opt_sema), + else => unreachable, + }; } pub fn ptrAddressSpace(ty: Type, mod: *const Module) std.builtin.AddressSpace { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.@"addrspace", - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| ptr_type.address_space, - .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.address_space, - else => unreachable, - }, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.address_space, + .opt_type => |child| mod.intern_pool.indexToKey(child).ptr_type.address_space, + else => unreachable, }; } @@ -1504,7 +1354,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, .none => switch (ty.tag()) { - .pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, // TODO revisit this when we have the concept of the error tag type .error_set_inferred, @@ -1541,10 +1390,11 @@ pub const Type = struct { .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), .error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), // represents machine code; not a pointer - .func_type => |func_type| { - const alignment = @intCast(u32, func_type.alignment); - if (alignment != 0) return AbiAlignmentAdvanced{ .scalar = alignment }; - return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) }; + .func_type => |func_type| return AbiAlignmentAdvanced{ + .scalar = if (func_type.alignment.toByteUnitsOptional()) |a| + @intCast(u32, a) + else + target_util.defaultFunctionAlignment(target), }, .simple_type => |t| switch (t) { @@ -1882,11 +1732,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) * 2 }, - else => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) }, - }, - // TODO revisit this when we have the concept of the error tag type .error_set_inferred, .error_set, @@ -2201,11 +2046,6 @@ pub const Type = struct { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth(), - }, - .error_set, .error_set_single, .error_set_inferred, @@ -2384,8 +2224,6 @@ pub const Type = struct { .inferred_alloc_mut, => true, - .pointer => ty.castTag(.pointer).?.data.size == .One, - else => false, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -2408,8 +2246,6 @@ pub const Type = struct { .inferred_alloc_mut, => .One, - .pointer => ty.castTag(.pointer).?.data.size, - else => null, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -2421,10 +2257,7 @@ pub const Type = struct { pub fn isSlice(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.size == .Slice, - else => false, - }, + .none => false, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.size == .Slice, else => false, @@ -2432,50 +2265,14 @@ pub const Type = struct { }; } - pub const SlicePtrFieldTypeBuffer = union { - pointer: Payload.Pointer, - }; - - pub fn slicePtrFieldType(ty: Type, buffer: *SlicePtrFieldTypeBuffer, mod: *const Module) Type { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => { - const payload = ty.castTag(.pointer).?.data; - assert(payload.size == .Slice); - - buffer.* = .{ - .pointer = .{ - .data = .{ - .pointee_type = payload.pointee_type, - .sentinel = payload.sentinel, - .@"align" = payload.@"align", - .@"addrspace" = payload.@"addrspace", - .bit_offset = payload.bit_offset, - .host_size = payload.host_size, - .vector_index = payload.vector_index, - .@"allowzero" = payload.@"allowzero", - .mutable = payload.mutable, - .@"volatile" = payload.@"volatile", - .size = .Many, - }, - }, - }; - return Type.initPayload(&buffer.pointer.base); - }, - - else => unreachable, - }, - else => return mod.intern_pool.slicePtrType(ty.ip_index).toType(), - } + pub fn slicePtrFieldType(ty: Type, mod: *const Module) Type { + return mod.intern_pool.slicePtrType(ty.ip_index).toType(); } pub fn isConstPtr(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => !ty.castTag(.pointer).?.data.mutable, - else => false, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .none => false, + else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.is_const, else => false, }, @@ -2488,10 +2285,7 @@ pub const Type = struct { pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.@"volatile", - else => false, - }, + .none => false, else => switch (ip.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.is_volatile, else => false, @@ -2501,12 +2295,10 @@ pub const Type = struct { pub fn isAllowzeroPtr(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.@"allowzero", - else => ty.zigTypeTag(mod) == .Optional, - }, + .none => false, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.is_allowzero, + .opt_type => true, else => false, }, }; @@ -2514,10 +2306,7 @@ pub const Type = struct { pub fn isCPtr(ty: Type, mod: *const Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.size == .C, - else => false, - }, + .none => false, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| ptr_type.size == .C, else => false, @@ -2526,16 +2315,9 @@ pub const Type = struct { } pub fn isPtrAtRuntime(ty: Type, mod: *const Module) bool { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => switch (ty.castTag(.pointer).?.data.size) { - .Slice => return false, - .One, .Many, .C => return true, - }, - - else => return false, - }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (ty.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => false, .One, .Many, .C => true, @@ -2549,7 +2331,7 @@ pub const Type = struct { }, else => false, }, - } + }; } /// For pointer-like optionals, returns true, otherwise returns the allowzero property @@ -2563,47 +2345,43 @@ pub const Type = struct { /// See also `isPtrLikeOptional`. pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .opt_type => |child| switch (child.toType().zigTypeTag(mod)) { - .Pointer => { - const info = child.toType().ptrInfo(mod); - switch (info.size) { - .C => return false, - else => return !info.@"allowzero", - } + return switch (ty.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opt_type => |child| switch (child.toType().zigTypeTag(mod)) { + .Pointer => { + const info = child.toType().ptrInfo(mod); + return switch (info.size) { + .C => false, + else => !info.@"allowzero", + }; + }, + .ErrorSet => true, + else => false, }, - .ErrorSet => true, else => false, }, - else => false, }; - switch (ty.tag()) { - .pointer => return ty.castTag(.pointer).?.data.size == .C, - - else => return false, - } } /// Returns true if the type is optional and would be lowered to a single pointer /// address value, using 0 for null. Note that this returns true for C pointers. /// This function must be kept in sync with `Sema.typePtrOrOptionalPtrTy`. pub fn isPtrLikeOptional(ty: Type, mod: *const Module) bool { - if (ty.ip_index != .none) return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| ptr_type.size == .C, - .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { - .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice, .C => false, - .Many, .One => !ptr_type.is_allowzero, + return switch (ty.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| ptr_type.size == .C, + .opt_type => |child| switch (mod.intern_pool.indexToKey(child)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice, .C => false, + .Many, .One => !ptr_type.is_allowzero, + }, + else => false, }, else => false, }, - else => false, }; - switch (ty.tag()) { - .pointer => return ty.castTag(.pointer).?.data.size == .C, - - else => return false, - } } /// For *[N]T, returns [N]T. @@ -2614,14 +2392,7 @@ pub const Type = struct { } pub fn childTypeIp(ty: Type, ip: InternPool) Type { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.pointee_type, - - else => unreachable, - }, - else => ip.childType(ty.ip_index).toType(), - }; + return ip.childType(ty.ip_index).toType(); } /// For *[N]T, returns T. @@ -2634,34 +2405,19 @@ pub const Type = struct { /// For []T, returns T. /// For anyframe->T, returns T. pub fn elemType2(ty: Type, mod: *const Module) Type { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => { - const info = ty.castTag(.pointer).?.data; - const child_ty = info.pointee_type; - if (info.size == .One) { - return child_ty.shallowElemType(mod); - } else { - return child_ty; - } - }, - - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .One => ptr_type.elem_type.toType().shallowElemType(mod), + .Many, .C, .Slice => ptr_type.elem_type.toType(), }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .ptr_type => |ptr_type| switch (ptr_type.size) { - .One => ptr_type.elem_type.toType().shallowElemType(mod), - .Many, .C, .Slice => ptr_type.elem_type.toType(), - }, - .anyframe_type => |child| { - assert(child != .none); - return child.toType(); - }, - .vector_type => |vector_type| vector_type.child.toType(), - .array_type => |array_type| array_type.child.toType(), - .opt_type => |child| mod.intern_pool.childType(child).toType(), - else => unreachable, + .anyframe_type => |child| { + assert(child != .none); + return child.toType(); }, + .vector_type => |vector_type| vector_type.child.toType(), + .array_type => |array_type| array_type.child.toType(), + .opt_type => |child| mod.intern_pool.childType(child).toType(), + else => unreachable, }; } @@ -2683,21 +2439,13 @@ pub const Type = struct { /// Asserts that the type is an optional. /// Note that for C pointers this returns the type unmodified. pub fn optionalChild(ty: Type, mod: *const Module) Type { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer, // here we assume it is a C pointer - => return ty, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .opt_type => |child| child.toType(), - .ptr_type => |ptr_type| b: { - assert(ptr_type.size == .C); - break :b ty; - }, - else => unreachable, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .opt_type => |child| child.toType(), + .ptr_type => |ptr_type| b: { + assert(ptr_type.size == .C); + break :b ty; }, + else => unreachable, }; } @@ -2921,23 +2669,16 @@ pub const Type = struct { /// Asserts the type is an array, pointer or vector. pub fn sentinel(ty: Type, mod: *const Module) ?Value { - return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .pointer => ty.castTag(.pointer).?.data.sentinel, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .vector_type, - .struct_type, - .anon_struct_type, - => null, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .vector_type, + .struct_type, + .anon_struct_type, + => null, - .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, - .ptr_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + .array_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, + .ptr_type => |t| if (t.sentinel != .none) t.sentinel.toValue() else null, - else => unreachable, - }, + else => unreachable, }; } @@ -3196,7 +2937,6 @@ pub const Type = struct { .error_set, .error_set_merged, .error_set_inferred, - .pointer, => return null, .inferred_alloc_const => unreachable, @@ -3400,15 +3140,6 @@ pub const Type = struct { .inferred_alloc_mut => unreachable, .inferred_alloc_const => unreachable, - .pointer => { - const child_ty = ty.childType(mod); - if (child_ty.zigTypeTag(mod) == .Fn) { - return false; - } else { - return child_ty.comptimeOnly(mod); - } - }, - .error_union => return ty.errorUnionPayload().comptimeOnly(mod), }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { @@ -4096,7 +3827,6 @@ pub const Type = struct { inferred_alloc_const, // See last_no_payload_tag below. // After this, the tag requires a payload. - pointer, error_union, error_set, error_set_single, @@ -4117,7 +3847,6 @@ pub const Type = struct { .error_set_inferred => Payload.ErrorSetInferred, .error_set_merged => Payload.ErrorSetMerged, - .pointer => Payload.Pointer, .error_union => Payload.ErrorUnion, .error_set_single => Payload.Name, }; @@ -4230,10 +3959,8 @@ pub const Type = struct { data: *Module.Fn.InferredErrorSet, }; + /// TODO: remove this data structure since we have `InternPool.Key.PtrType`. pub const Pointer = struct { - pub const base_tag = Tag.pointer; - - base: Payload = Payload{ .tag = base_tag }, data: Data, pub const Data = struct { @@ -4270,7 +3997,7 @@ pub const Type = struct { return .{ .pointee_type = p.elem_type.toType(), .sentinel = if (p.sentinel != .none) p.sentinel.toValue() else null, - .@"align" = @intCast(u32, p.alignment), + .@"align" = @intCast(u32, p.alignment.toByteUnits(0)), .@"addrspace" = p.address_space, .bit_offset = p.bit_offset, .host_size = p.host_size, @@ -4368,11 +4095,11 @@ pub const Type = struct { pub const err_int = Type.u16; pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type { - var d = data; + // TODO: update callsites of this function to directly call mod.ptrType + // and then delete this function. + _ = arena; - if (d.size == .C) { - d.@"allowzero" = true; - } + var d = data; // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee // type, we change it to 0 here. If this causes an assertion trip because the @@ -4396,32 +4123,19 @@ pub const Type = struct { } } - ip: { - if (d.pointee_type.ip_index == .none) break :ip; - - if (d.sentinel) |s| { - switch (s.ip_index) { - .none, .null_value => break :ip, - else => {}, - } - } - - return mod.ptrType(.{ - .elem_type = d.pointee_type.ip_index, - .sentinel = if (d.sentinel) |s| s.ip_index else .none, - .alignment = d.@"align", - .host_size = d.host_size, - .bit_offset = d.bit_offset, - .vector_index = d.vector_index, - .size = d.size, - .is_const = !d.mutable, - .is_volatile = d.@"volatile", - .is_allowzero = d.@"allowzero", - .address_space = d.@"addrspace", - }); - } - - return Type.Tag.pointer.create(arena, d); + return mod.ptrType(.{ + .elem_type = d.pointee_type.ip_index, + .sentinel = if (d.sentinel) |s| s.ip_index else .none, + .alignment = InternPool.Alignment.fromByteUnits(d.@"align"), + .host_size = d.host_size, + .bit_offset = d.bit_offset, + .vector_index = d.vector_index, + .size = d.size, + .is_const = !d.mutable, + .is_volatile = d.@"volatile", + .is_allowzero = d.@"allowzero", + .address_space = d.@"addrspace", + }); } pub fn array( diff --git a/src/value.zig b/src/value.zig index 35d144f912..3100496085 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1844,8 +1844,7 @@ pub const Value = struct { return false; } - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); return eqlAdvanced(a_payload.ptr, ptr_ty, b_payload.ptr, ptr_ty, mod, opt_sema); }, @@ -2001,8 +2000,7 @@ pub const Value = struct { return false; } - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); const a_ptr = switch (a_ty.ptrSize(mod)) { .Slice => a.slicePtr(), .One => a, @@ -2121,8 +2119,7 @@ pub const Value = struct { .Bool, .Int, .ComptimeInt, .Pointer => switch (val.tag()) { .slice => { const slice = val.castTag(.slice).?.data; - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); hash(slice.ptr, ptr_ty, hasher, mod); hash(slice.len, Type.usize, hasher, mod); }, @@ -2253,8 +2250,7 @@ pub const Value = struct { .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { .slice => { const slice = val.castTag(.slice).?.data; - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&ptr_buf, mod); + const ptr_ty = ty.slicePtrFieldType(mod); slice.ptr.hashUncoerced(ptr_ty, hasher, mod); }, else => val.hashPtr(hasher, mod), -- cgit v1.2.3 From 9ff514b6a35b7201f45f8bff31c61b4f8cfa7a7a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 20 May 2023 12:09:07 -0700 Subject: compiler: move error union types and error set types to InternPool One change worth noting in this commit is that `module.global_error_set` is no longer kept strictly up-to-date. The previous code reserved integer error values when dealing with error set types, but this is no longer needed because the integer values are not needed for semantic analysis unless `@errorToInt` or `@intToError` are used and therefore may be assigned lazily. --- src/Air.zig | 2 +- src/InternPool.zig | 176 +++++- src/Liveness.zig | 2 +- src/Liveness/Verify.zig | 2 +- src/Module.zig | 169 +++--- src/Sema.zig | 821 +++++++++++++-------------- src/TypedValue.zig | 6 +- src/arch/aarch64/CodeGen.zig | 18 +- src/arch/arm/CodeGen.zig | 18 +- src/arch/sparc64/CodeGen.zig | 20 +- src/arch/wasm/CodeGen.zig | 41 +- src/arch/x86_64/CodeGen.zig | 28 +- src/codegen.zig | 12 +- src/codegen/c.zig | 39 +- src/codegen/c/type.zig | 4 +- src/codegen/llvm.zig | 74 +-- src/codegen/spirv.zig | 13 +- src/link/Dwarf.zig | 54 +- src/print_air.zig | 1 - src/type.zig | 1259 +++++++++++++----------------------------- src/value.zig | 18 +- 21 files changed, 1195 insertions(+), 1582 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index 09f8d6c9e2..6673a37fb6 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1411,7 +1411,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .@"try" => { const err_union_ty = air.typeOf(datas[inst].pl_op.operand, ip); - return err_union_ty.errorUnionPayload(); + return ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type.toType(); }, .work_item_id, diff --git a/src/InternPool.zig b/src/InternPool.zig index 81035bffc5..79506c4404 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -34,6 +34,14 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, /// When a Union object is freed from `allocated_unions`, it is pushed into this stack. unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, +/// InferredErrorSet objects are stored in this data structure because: +/// * They contain pointers such as the errors map and the set of other inferred error sets. +/// * They need to be mutated after creation. +allocated_inferred_error_sets: std.SegmentedList(Module.Fn.InferredErrorSet, 0) = .{}, +/// When a Struct object is freed from `allocated_inferred_error_sets`, it is +/// pushed into this stack. +inferred_error_sets_free_list: std.ArrayListUnmanaged(Module.Fn.InferredErrorSet.Index) = .{}, + /// Some types such as enums, structs, and unions need to store mappings from field names /// to field index, or value to field index. In such cases, they will store the underlying /// field names and values directly, relying on one of these maps, stored separately, @@ -113,6 +121,12 @@ pub const NullTerminatedString = enum(u32) { return std.hash.uint32(@enumToInt(a)); } }; + + /// Compare based on integer value alone, ignoring the string contents. + pub fn indexLessThan(ctx: void, a: NullTerminatedString, b: NullTerminatedString) bool { + _ = ctx; + return @enumToInt(a) < @enumToInt(b); + } }; /// An index into `string_bytes` which might be `none`. @@ -135,10 +149,7 @@ pub const Key = union(enum) { /// `anyframe->T`. The payload is the child type, which may be `none` to indicate /// `anyframe`. anyframe_type: Index, - error_union_type: struct { - error_set_type: Index, - payload_type: Index, - }, + error_union_type: ErrorUnionType, simple_type: SimpleType, /// This represents a struct that has been explicitly declared in source code, /// or was created with `@Type`. It is unique and based on a declaration. @@ -152,6 +163,8 @@ pub const Key = union(enum) { opaque_type: OpaqueType, enum_type: EnumType, func_type: FuncType, + error_set_type: ErrorSetType, + inferred_error_set_type: Module.Fn.InferredErrorSet.Index, /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented /// via `simple_value` and has a named `Index` tag for it. @@ -183,6 +196,26 @@ pub const Key = union(enum) { pub const IntType = std.builtin.Type.Int; + pub const ErrorUnionType = struct { + error_set_type: Index, + payload_type: Index, + }; + + pub const ErrorSetType = struct { + /// Set of error names, sorted by null terminated string index. + names: []const NullTerminatedString, + /// This is ignored by `get` but will always be provided by `indexToKey`. + names_map: OptionalMapIndex = .none, + + /// Look up field index based on field name. + pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 { + const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; + const field_index = map.getIndexAdapted(name, adapter) orelse return null; + return @intCast(u32, field_index); + } + }; + pub const PtrType = struct { elem_type: Index, sentinel: Index = .none, @@ -507,6 +540,7 @@ pub const Key = union(enum) { .un, .undef, .enum_tag, + .inferred_error_set_type, => |info| std.hash.autoHash(hasher, info), .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), @@ -535,7 +569,7 @@ pub const Key = union(enum) { .ptr => |ptr| { std.hash.autoHash(hasher, ptr.ty); // Int-to-ptr pointers are hashed separately than decl-referencing pointers. - // This is sound due to pointer province rules. + // This is sound due to pointer provenance rules. switch (ptr.addr) { .int => |int| std.hash.autoHash(hasher, int), .decl => @panic("TODO"), @@ -547,6 +581,10 @@ pub const Key = union(enum) { for (aggregate.fields) |field| std.hash.autoHash(hasher, field); }, + .error_set_type => |error_set_type| { + for (error_set_type.names) |elem| std.hash.autoHash(hasher, elem); + }, + .anon_struct_type => |anon_struct_type| { for (anon_struct_type.types) |elem| std.hash.autoHash(hasher, elem); for (anon_struct_type.values) |elem| std.hash.autoHash(hasher, elem); @@ -726,6 +764,14 @@ pub const Key = union(enum) { std.mem.eql(Index, a_info.values, b_info.values) and std.mem.eql(NullTerminatedString, a_info.names, b_info.names); }, + .error_set_type => |a_info| { + const b_info = b.error_set_type; + return std.mem.eql(NullTerminatedString, a_info.names, b_info.names); + }, + .inferred_error_set_type => |a_info| { + const b_info = b.inferred_error_set_type; + return a_info == b_info; + }, .func_type => |a_info| { const b_info = b.func_type; @@ -752,6 +798,8 @@ pub const Key = union(enum) { .opt_type, .anyframe_type, .error_union_type, + .error_set_type, + .inferred_error_set_type, .simple_type, .struct_type, .union_type, @@ -1207,8 +1255,14 @@ pub const Tag = enum(u8) { /// If the child type is `none`, the type is `anyframe`. type_anyframe, /// An error union type. - /// data is payload to ErrorUnion. + /// data is payload to `Key.ErrorUnionType`. type_error_union, + /// An error set type. + /// data is payload to `ErrorSet`. + type_error_set, + /// The inferred error set type of a function. + /// data is `Module.Fn.InferredErrorSet.Index`. + type_inferred_error_set, /// An enum type with auto-numbered tag values. /// The enum is exhaustive. /// data is payload index to `EnumAuto`. @@ -1355,6 +1409,12 @@ pub const Tag = enum(u8) { aggregate, }; +/// Trailing: +/// 0. name: NullTerminatedString for each names_len +pub const ErrorSet = struct { + names_len: u32, +}; + /// Trailing: /// 0. param_type: Index for each params_len pub const TypeFunction = struct { @@ -1539,11 +1599,6 @@ pub const Array = struct { } }; -pub const ErrorUnion = struct { - error_set_type: Index, - payload_type: Index, -}; - /// Trailing: /// 0. field name: NullTerminatedString for each fields_len; declaration order /// 1. tag value: Index for each fields_len; declaration order @@ -1719,6 +1774,9 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); + ip.inferred_error_sets_free_list.deinit(gpa); + ip.allocated_inferred_error_sets.deinit(gpa); + for (ip.maps.items) |*map| map.deinit(gpa); ip.maps.deinit(gpa); @@ -1798,7 +1856,18 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_optional => .{ .opt_type = @intToEnum(Index, data) }, .type_anyframe => .{ .anyframe_type = @intToEnum(Index, data) }, - .type_error_union => @panic("TODO"), + .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, + .type_error_set => { + const error_set = ip.extraDataTrail(ErrorSet, data); + const names_len = error_set.data.names_len; + const names = ip.extra.items[error_set.end..][0..names_len]; + return .{ .error_set_type = .{ + .names = @ptrCast([]const NullTerminatedString, names), + } }; + }, + .type_inferred_error_set => .{ + .inferred_error_set_type = @intToEnum(Module.Fn.InferredErrorSet.Index, data), + }, .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, .type_struct => { @@ -2179,11 +2248,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .error_union_type => |error_union_type| { ip.items.appendAssumeCapacity(.{ .tag = .type_error_union, - .data = try ip.addExtra(gpa, ErrorUnion{ - .error_set_type = error_union_type.error_set_type, - .payload_type = error_union_type.payload_type, + .data = try ip.addExtra(gpa, error_union_type), + }); + }, + .error_set_type => |error_set_type| { + assert(error_set_type.names_map == .none); + assert(std.sort.isSorted(NullTerminatedString, error_set_type.names, {}, NullTerminatedString.indexLessThan)); + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, error_set_type.names); + const names_len = @intCast(u32, error_set_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(ErrorSet).Struct.fields.len + names_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_set, + .data = ip.addExtraAssumeCapacity(ErrorSet{ + .names_len = names_len, }), }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, error_set_type.names)); + }, + .inferred_error_set_type => |ies_index| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_inferred_error_set, + .data = @enumToInt(ies_index), + }); }, .simple_type => |simple_type| { ip.items.appendAssumeCapacity(.{ @@ -3192,12 +3279,26 @@ pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { } } +pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .type_inferred_error_set) return .none; + const datas = ip.items.items(.data); + return @intToEnum(Module.Fn.InferredErrorSet.Index, datas[@enumToInt(val)]).toOptional(); +} + pub fn isOptionalType(ip: InternPool, ty: Index) bool { const tags = ip.items.items(.tag); if (ty == .none) return false; return tags[@enumToInt(ty)] == .type_optional; } +pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool { + const tags = ip.items.items(.tag); + assert(ty != .none); + return tags[@enumToInt(ty)] == .type_inferred_error_set; +} + pub fn dump(ip: InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } @@ -3258,7 +3359,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .type_slice => 0, .type_optional => 0, .type_anyframe => 0, - .type_error_union => @sizeOf(ErrorUnion), + .type_error_union => @sizeOf(Key.ErrorUnionType), + .type_error_set => b: { + const info = ip.extraData(ErrorSet, data); + break :b @sizeOf(ErrorSet) + (@sizeOf(u32) * info.names_len); + }, + .type_inferred_error_set => @sizeOf(Module.Fn.InferredErrorSet), .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), .type_enum_auto => @sizeOf(EnumAuto), .type_opaque => @sizeOf(Key.OpaqueType), @@ -3359,6 +3465,14 @@ pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } +pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet { + return ip.allocated_inferred_error_sets.at(@enumToInt(index)); +} + +pub fn inferredErrorSetPtrConst(ip: InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { + return ip.allocated_inferred_error_sets.at(@enumToInt(index)); +} + pub fn createStruct( ip: *InternPool, gpa: Allocator, @@ -3397,6 +3511,25 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) }; } +pub fn createInferredErrorSet( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Fn.InferredErrorSet, +) Allocator.Error!Module.Fn.InferredErrorSet.Index { + if (ip.inferred_error_sets_free_list.popOrNull()) |index| return index; + const ptr = try ip.allocated_inferred_error_sets.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Fn.InferredErrorSet.Index, ip.allocated_inferred_error_sets.len - 1); +} + +pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.Fn.InferredErrorSet.Index) void { + ip.inferredErrorSetPtr(index).* = undefined; + ip.inferred_error_sets_free_list.append(gpa, index) catch { + // In order to keep `destroyInferredErrorSet` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the InferredErrorSet until garbage collection. + }; +} + pub fn getOrPutString( ip: *InternPool, gpa: Allocator, @@ -3459,3 +3592,14 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { else => unreachable, }; } + +pub fn isNoReturn(ip: InternPool, ty: InternPool.Index) bool { + return switch (ty) { + .noreturn_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type => |error_set_type| error_set_type.names.len == 0, + .enum_type => |enum_type| enum_type.names.len == 0, + else => false, + }, + }; +} diff --git a/src/Liveness.zig b/src/Liveness.zig index da705cfab8..856123fa9d 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -1416,7 +1416,7 @@ fn analyzeInstBlock( // If the block is noreturn, block deaths not only aren't useful, they're impossible to // find: there could be more stuff alive after the block than before it! - if (!a.air.getRefType(ty_pl.ty).isNoReturn()) { + if (!a.intern_pool.isNoReturn(a.air.getRefType(ty_pl.ty).ip_index)) { // The block kills the difference in the live sets const block_scope = data.block_scopes.get(inst).?; const num_deaths = data.live_set.count() - block_scope.live_set.count(); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index dbdbf32174..923e6f5658 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -453,7 +453,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (block_liveness.deaths) |death| try self.verifyDeath(inst, death); - if (block_ty.isNoReturn()) { + if (ip.isNoReturn(block_ty.toIntern())) { assert(!self.blocks.contains(inst)); } else { var live = self.blocks.fetchRemove(inst).?.value; diff --git a/src/Module.zig b/src/Module.zig index 5cd0d237b4..70b08ea3a9 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -960,38 +960,6 @@ pub const EmitH = struct { fwd_decl: ArrayListUnmanaged(u8) = .{}, }; -/// Represents the data that an explicit error set syntax provides. -pub const ErrorSet = struct { - /// The Decl that corresponds to the error set itself. - owner_decl: Decl.Index, - /// The string bytes are stored in the owner Decl arena. - /// These must be in sorted order. See sortNames. - names: NameMap, - - pub const NameMap = std.StringArrayHashMapUnmanaged(void); - - pub fn srcLoc(self: ErrorSet, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(mod), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - /// sort the NameMap. This should be called whenever the map is modified. - /// alloc should be the allocator used for the NameMap data. - pub fn sortNames(names: *NameMap) void { - const Context = struct { - keys: [][]const u8, - pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { - return std.mem.lessThan(u8, ctx.keys[a_index], ctx.keys[b_index]); - } - }; - names.sort(Context{ .keys = names.keys() }); - } -}; - pub const PropertyBoolean = enum { no, yes, unknown, wip }; /// Represents the data that a struct declaration provides. @@ -1530,13 +1498,6 @@ pub const Fn = struct { is_noinline: bool, calls_or_awaits_errorable_fn: bool = false, - /// Any inferred error sets that this function owns, both its own inferred error set and - /// inferred error sets of any inline/comptime functions called. Not to be confused - /// with inferred error sets of generic instantiations of this function, which are - /// *not* tracked here - they are tracked in the new `Fn` object created for the - /// instantiations. - inferred_error_sets: InferredErrorSetList = .{}, - pub const Analysis = enum { /// This function has not yet undergone analysis, because we have not /// seen a potential runtime call. It may be analyzed in future. @@ -1568,10 +1529,10 @@ pub const Fn = struct { /// direct additions via `return error.Foo;`, and possibly also errors that /// are returned from any dependent functions. When the inferred error set is /// fully resolved, this map contains all the errors that the function might return. - errors: ErrorSet.NameMap = .{}, + errors: NameMap = .{}, /// Other inferred error sets which this inferred error set should include. - inferred_error_sets: std.AutoArrayHashMapUnmanaged(*InferredErrorSet, void) = .{}, + inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{}, /// Whether the function returned anyerror. This is true if either of /// the dependent functions returns anyerror. @@ -1581,51 +1542,59 @@ pub const Fn = struct { /// can skip resolving any dependents of this inferred error set. is_resolved: bool = false, - pub fn addErrorSet(self: *InferredErrorSet, gpa: Allocator, err_set_ty: Type) !void { + pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); + + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + + pub fn addErrorSet( + self: *InferredErrorSet, + err_set_ty: Type, + ip: *InternPool, + gpa: Allocator, + ) !void { switch (err_set_ty.ip_index) { .anyerror_type => { self.is_anyerror = true; }, - .none => switch (err_set_ty.tag()) { - .error_set => { - const names = err_set_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { + else => switch (ip.indexToKey(err_set_ty.ip_index)) { + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { try self.errors.put(gpa, name, {}); } }, - .error_set_single => { - const name = err_set_ty.castTag(.error_set_single).?.data; - try self.errors.put(gpa, name, {}); - }, - .error_set_inferred => { - const ies = err_set_ty.castTag(.error_set_inferred).?.data; - try self.inferred_error_sets.put(gpa, ies, {}); - }, - .error_set_merged => { - const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - try self.errors.put(gpa, name, {}); - } + .inferred_error_set_type => |ies_index| { + try self.inferred_error_sets.put(gpa, ies_index, {}); }, else => unreachable, }, - else => @panic("TODO"), } } }; - pub const InferredErrorSetList = std.SinglyLinkedList(InferredErrorSet); - pub const InferredErrorSetListNode = InferredErrorSetList.Node; - + /// TODO: remove this function pub fn deinit(func: *Fn, gpa: Allocator) void { - var it = func.inferred_error_sets.first; - while (it) |node| { - const next = node.next; - node.data.errors.deinit(gpa); - node.data.inferred_error_sets.deinit(gpa); - gpa.destroy(node); - it = next; - } + _ = func; + _ = gpa; } pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { @@ -3508,6 +3477,10 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { return mod.intern_pool.structPtr(index); } +pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet { + return mod.intern_pool.inferredErrorSetPtr(index); +} + /// This one accepts an index from the InternPool and asserts that it is not /// the anonymous empty struct type. pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { @@ -4722,7 +4695,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl_tv.ty.fmt(mod), }); } - const ty = try decl_tv.val.toType().copy(decl_arena_allocator); + const ty = decl_tv.val.toType(); if (ty.getNamespace(mod) == null) { return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); } @@ -4756,7 +4729,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { } decl.clearValues(mod); - decl.ty = try decl_tv.ty.copy(decl_arena_allocator); + decl.ty = decl_tv.ty; decl.val = try decl_tv.val.copy(decl_arena_allocator); // linksection, align, and addrspace were already set by Sema decl.has_tv = true; @@ -4823,7 +4796,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }, } - decl.ty = try decl_tv.ty.copy(decl_arena_allocator); + decl.ty = decl_tv.ty; decl.val = try decl_tv.val.copy(decl_arena_allocator); decl.@"align" = blk: { const align_ref = decl.zirAlignRef(mod); @@ -6599,7 +6572,7 @@ pub fn populateTestFunctions( // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. const new_ty = try Type.ptr(arena, mod, .{ .size = .Slice, - .pointee_type = try tmp_test_fn_ty.copy(arena), + .pointee_type = tmp_test_fn_ty, .mutable = false, .@"addrspace" = .generic, }); @@ -6877,6 +6850,42 @@ pub fn anyframeType(mod: *Module, payload_ty: Type) Allocator.Error!Type { return (try intern(mod, .{ .anyframe_type = payload_ty.toIntern() })).toType(); } +pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type { + return (try intern(mod, .{ .error_union_type = .{ + .error_set_type = error_set_ty.toIntern(), + .payload_type = payload_ty.toIntern(), + } })).toType(); +} + +pub fn singleErrorSetType(mod: *Module, name: []const u8) Allocator.Error!Type { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + return singleErrorSetTypeNts(mod, try ip.getOrPutString(gpa, name)); +} + +pub fn singleErrorSetTypeNts(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + const names = [1]InternPool.NullTerminatedString{name}; + const i = try ip.get(gpa, .{ .error_set_type = .{ .names = &names } }); + return i.toType(); +} + +/// Sorts `names` in place. +pub fn errorSetFromUnsortedNames( + mod: *Module, + names: []InternPool.NullTerminatedString, +) Allocator.Error!Type { + std.mem.sort( + InternPool.NullTerminatedString, + names, + {}, + InternPool.NullTerminatedString.indexLessThan, + ); + const new_ty = try mod.intern(.{ .error_set_type = .{ .names = names } }); + return new_ty.toType(); +} + /// Supports optionals in addition to pointers. pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { if (ty.isPtrLikeOptional(mod)) { @@ -7240,6 +7249,16 @@ pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { return mod.intern_pool.indexToFuncType(ty.ip_index); } +pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { + const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null; + return mod.inferredErrorSetPtr(index); +} + +pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex { + if (ty.ip_index == .none) return .none; + return mod.intern_pool.indexToInferredErrorSetType(ty.ip_index); +} + pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { @setCold(true); const owner_decl = mod.declPtr(owner_decl_index); diff --git a/src/Sema.zig b/src/Sema.zig index 74efe9d141..be505d74a3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -825,12 +825,13 @@ pub fn analyzeBodyBreak( block: *Block, body: []const Zir.Inst.Index, ) CompileError!?BreakData { + const mod = sema.mod; const break_inst = sema.analyzeBodyInner(block, body) catch |err| switch (err) { error.ComptimeBreak => sema.comptime_break_inst, else => |e| return e, }; if (block.instructions.items.len != 0 and - sema.typeOf(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])).isNoReturn()) + sema.typeOf(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])).isNoReturn(mod)) return null; const break_data = sema.code.instructions.items(.data)[break_inst].@"break"; const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; @@ -1701,7 +1702,7 @@ fn analyzeBodyInner( break :blk Air.Inst.Ref.void_value; }, }; - if (sema.typeOf(air_inst).isNoReturn()) + if (sema.typeOf(air_inst).isNoReturn(mod)) break always_noreturn; map.putAssumeCapacity(inst, air_inst); i += 1; @@ -1796,8 +1797,7 @@ fn analyzeAsType( const wanted_type = Type.type; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known"); - const ty = val.toType(); - return ty.copy(sema.arena); + return val.toType(); } pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { @@ -2004,7 +2004,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, - .const_ty => return try air_datas[i].ty.toValue(sema.arena), + .const_ty => return air_datas[i].ty.toValue(), .interned => return air_datas[i].interned.toValue(), else => return null, } @@ -2131,7 +2131,7 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec }; return sema.failWithOwnedErrorMsg(msg); } else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: { - const child_ty = inner_ty.errorUnionPayload(); + const child_ty = inner_ty.errorUnionPayload(mod); if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err; const msg = msg: { const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); @@ -2473,7 +2473,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data.decl_index = try anon_decl.finish( - try pointee_ty.copy(anon_decl.arena()), + pointee_ty, Value.undef, iac.data.alignment, ); @@ -3250,47 +3250,35 @@ fn zirErrorSetDecl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const error_set = try new_decl_arena_allocator.create(Module.ErrorSet); - const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set); - const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty); - const mod = sema.mod; - const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = error_set_val, - }, name_strategy, "error", inst); - const new_decl = mod.declPtr(new_decl_index); - new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); - - var names = Module.ErrorSet.NameMap{}; - try names.ensureUnusedCapacity(new_decl_arena_allocator, extra.data.fields_len); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; + try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len); var extra_index = @intCast(u32, extra.end); const extra_index_end = extra_index + (extra.data.fields_len * 2); while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; - const kv = try mod.getErrorValue(sema.code.nullTerminatedString(str_index)); - const result = names.getOrPutAssumeCapacity(kv.key); + const name = sema.code.nullTerminatedString(str_index); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen } - // names must be sorted. - Module.ErrorSet.sortNames(&names); + const error_set_ty = try mod.errorSetFromUnsortedNames(names.keys()); + + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ + .ty = Type.type, + .val = error_set_ty.toValue(), + }, name_strategy, "error", inst); + const new_decl = mod.declPtr(new_decl_index); + new_decl.owns_tv = true; + errdefer mod.abortAnonDecl(new_decl_index); - error_set.* = .{ - .owner_decl = new_decl_index, - .names = names, - }; - try new_decl.finalizeNewArena(&new_decl_arena); return sema.analyzeDeclVal(block, src, new_decl_index); } @@ -3407,7 +3395,7 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index else operand_ty; if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; - const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(mod); + const payload_ty = err_union_ty.errorUnionPayload(mod).zigTypeTag(mod); if (payload_ty != .Void and payload_ty != .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "error union payload is ignored", .{}); @@ -3590,7 +3578,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try elem_ty.copy(anon_decl.arena()), + elem_ty, try store_val.copy(anon_decl.arena()), ptr_info.@"align", )); @@ -3722,7 +3710,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const var_is_mut = switch (sema.typeOf(ptr).tag()) { .inferred_alloc_const => false, .inferred_alloc_mut => true, - else => unreachable, }; const target = sema.mod.getTarget(); @@ -3733,7 +3720,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); const decl = sema.mod.declPtr(decl_index); - const final_elem_ty = try decl.ty.copy(sema.arena); + const final_elem_ty = decl.ty; const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = final_elem_ty, .mutable = true, @@ -3833,7 +3820,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const new_decl_index = try anon_decl.finish( - try final_elem_ty.copy(anon_decl.arena()), + final_elem_ty, try store_val.copy(anon_decl.arena()), inferred_alloc.data.alignment, ); @@ -5042,7 +5029,7 @@ fn storeToInferredAllocComptime( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data.decl_index = try anon_decl.finish( - try operand_ty.copy(anon_decl.arena()), + operand_ty, try operand_val.copy(anon_decl.arena()), iac.data.alignment, ); @@ -5286,6 +5273,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); @@ -5335,7 +5323,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError try sema.analyzeBody(&loop_block, body); const loop_block_len = loop_block.instructions.items.len; - if (loop_block_len > 0 and sema.typeOf(Air.indexToRef(loop_block.instructions.items[loop_block_len - 1])).isNoReturn()) { + if (loop_block_len > 0 and sema.typeOf(Air.indexToRef(loop_block.instructions.items[loop_block_len - 1])).isNoReturn(mod)) { // If the loop ended with a noreturn terminator, then there is no way for it to loop, // so we can just use the block instead. try child_block.instructions.appendSlice(gpa, loop_block.instructions.items); @@ -5588,7 +5576,7 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); + assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn(mod)); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions @@ -5755,7 +5743,7 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); break :blk try anon_decl.finish( - try operand.ty.copy(anon_decl.arena()), + operand.ty, try operand.val.copy(anon_decl.arena()), 0, ); @@ -6434,7 +6422,7 @@ fn zirCall( }; const return_ty = sema.typeOf(call_inst); - if (modifier != .always_tail and return_ty.isNoReturn()) + if (modifier != .always_tail and return_ty.isNoReturn(mod)) return call_inst; // call to "fn(...) noreturn", don't pop // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only @@ -6957,17 +6945,11 @@ fn analyzeCall( // Create a fresh inferred error set type for inline/comptime calls. const fn_ret_ty = blk: { if (module_fn.hasInferredErrorSet(mod)) { - const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); - node.data = .{ .func = module_fn }; - if (parent_func) |some| { - some.inferred_error_sets.prepend(node); - } - - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); - break :blk try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = bare_return_type, + const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ + .func = module_fn, }); + const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); + break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); } break :blk bare_return_type; }; @@ -7843,21 +7825,21 @@ fn resolveGenericInstantiationType( // `GenericCallAdapter.eql` as well as function body analysis. // Whether it is anytype is communicated by `isAnytypeParam`. const arg = child_sema.inst_map.get(inst).?; - const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator); + const arg_ty = child_sema.typeOf(arg); - if (try sema.typeRequiresComptime(copied_arg_ty)) { + if (try sema.typeRequiresComptime(arg_ty)) { is_comptime = true; } if (is_comptime) { const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?; child_sema.comptime_args[arg_i] = .{ - .ty = copied_arg_ty, + .ty = arg_ty, .val = try arg_val.copy(new_decl_arena_allocator), }; } else { child_sema.comptime_args[arg_i] = .{ - .ty = copied_arg_ty, + .ty = arg_ty, .val = Value.generic_poison, }; } @@ -7868,7 +7850,7 @@ fn resolveGenericInstantiationType( try wip_captures.finalize(); // Populate the Decl ty/val with the function and its type. - new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator); + new_decl.ty = child_sema.typeOf(new_func_inst); // If the call evaluated to a return type that requires comptime, never mind // our generic instantiation. Instead we need to perform a comptime call. const new_fn_info = mod.typeToFunc(new_decl.ty).?; @@ -8068,7 +8050,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr }); } try sema.validateErrorUnionPayloadType(block, payload, rhs_src); - const err_union_ty = try Type.errorUnion(sema.arena, error_set, payload, sema.mod); + const err_union_ty = try mod.errorUnionType(error_set, payload); return sema.addType(err_union_ty); } @@ -8087,16 +8069,13 @@ fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, p fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const tracy = trace(@src()); - defer tracy.end(); - + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - - // Create an anonymous error set type with only this error value, and return the value. - const kv = try sema.mod.getErrorValue(inst_data.get(sema.code)); - const result_type = try Type.Tag.error_set_single.create(sema.arena, kv.key); + const name = inst_data.get(sema.code); + // Create an error set type with only this error value, and return the value. + const kv = try sema.mod.getErrorValue(name); return sema.addConstant( - result_type, + try mod.singleErrorSetType(kv.key), try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key, }), @@ -8139,11 +8118,14 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const op_ty = sema.typeOf(uncasted_operand); try sema.resolveInferredErrorSetTy(block, src, op_ty); - if (!op_ty.isAnyError()) { - const names = op_ty.errorSetNames(); + if (!op_ty.isAnyError(mod)) { + const names = op_ty.errorSetNames(mod); switch (names.len) { 0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)), - 1 => return sema.addIntUnsigned(Type.err_int, sema.mod.global_error_set.get(names[0]).?), + 1 => { + const name = mod.intern_pool.stringToSlice(names[0]); + return sema.addIntUnsigned(Type.err_int, mod.global_error_set.get(name).?); + }, else => {}, } } @@ -8224,22 +8206,22 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return Air.Inst.Ref.anyerror_type; } - if (lhs_ty.castTag(.error_set_inferred)) |payload| { - try sema.resolveInferredErrorSet(block, src, payload.data); + if (mod.typeToInferredErrorSetIndex(lhs_ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (lhs_ty.isAnyError()) { + if (lhs_ty.isAnyError(mod)) { return Air.Inst.Ref.anyerror_type; } } - if (rhs_ty.castTag(.error_set_inferred)) |payload| { - try sema.resolveInferredErrorSet(block, src, payload.data); + if (mod.typeToInferredErrorSetIndex(rhs_ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (rhs_ty.isAnyError()) { + if (rhs_ty.isAnyError(mod)) { return Air.Inst.Ref.anyerror_type; } } - const err_set_ty = try lhs_ty.errorSetMerge(sema.arena, rhs_ty); + const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty); return sema.addType(err_set_ty); } @@ -8484,7 +8466,7 @@ fn zirOptionalPayload( if (true) break :t operand_ty; const ptr_info = operand_ty.ptrInfo(mod); break :t try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = try ptr_info.pointee_type.copy(sema.arena), + .pointee_type = ptr_info.pointee_type, .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, @@ -8547,7 +8529,7 @@ fn analyzeErrUnionPayload( safety_check: bool, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { if (val.getError()) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); @@ -8560,7 +8542,7 @@ fn analyzeErrUnionPayload( // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) + !err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err, .is_non_err); } @@ -8603,7 +8585,7 @@ fn analyzeErrUnionPayloadPtr( } const err_union_ty = operand_ty.childType(mod); - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(mod), @@ -8646,7 +8628,7 @@ fn analyzeErrUnionPayloadPtr( // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) + !err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr); } @@ -8678,7 +8660,7 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air }); } - const result_ty = operand_ty.errorUnionSet(); + const result_ty = operand_ty.errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |val| { assert(val.getError() != null); @@ -8707,7 +8689,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE }); } - const result_ty = operand_ty.childType(mod).errorUnionSet(); + const result_ty = operand_ty.childType(mod).errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { @@ -8755,7 +8737,7 @@ fn zirFunc( extra_index += ret_ty_body.len; const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, "return type must be comptime-known"); - break :blk try ret_ty_val.toType().copy(sema.arena); + break :blk ret_ty_val.toType(); }, }; @@ -8927,6 +8909,7 @@ fn funcCommon( is_noinline: bool, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset }; const func_src = LazySrcLoc.nodeOffset(src_node_offset); @@ -8955,16 +8938,12 @@ fn funcCommon( break :new_func new_func; } destroy_fn_on_error = true; - const new_func = try sema.gpa.create(Module.Fn); + const new_func = try gpa.create(Module.Fn); // Set this here so that the inferred return type can be printed correctly if it appears in an error. new_func.owner_decl = sema.owner_decl_index; break :new_func new_func; }; - errdefer if (destroy_fn_on_error) sema.gpa.destroy(new_func); - - var maybe_inferred_error_set_node: ?*Module.Fn.InferredErrorSetListNode = null; - errdefer if (maybe_inferred_error_set_node) |node| sema.gpa.destroy(node); - // Note: no need to errdefer since this will still be in its default state at the end of the function. + errdefer if (destroy_fn_on_error) gpa.destroy(new_func); const target = sema.mod.getTarget(); const fn_ty: Type = fn_ty: { @@ -9027,15 +9006,11 @@ fn funcCommon( bare_return_type else blk: { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); - const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); - node.data = .{ .func = new_func }; - maybe_inferred_error_set_node = node; - - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); - break :blk try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = bare_return_type, + const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ + .func = new_func, }); + const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); + break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); }; if (!return_type.isValidReturnType(mod)) { @@ -9044,7 +9019,7 @@ fn funcCommon( const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{ opaque_str, return_type.fmt(sema.mod), }); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, return_type); break :msg msg; @@ -9058,7 +9033,7 @@ fn funcCommon( const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ return_type.fmt(sema.mod), @tagName(cc_resolved), }); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = sema.mod.declPtr(block.src_decl); try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty); @@ -9182,8 +9157,8 @@ fn funcCommon( sema.owner_decl.@"addrspace" = address_space orelse .generic; if (is_extern) { - const new_extern_fn = try sema.gpa.create(Module.ExternFn); - errdefer sema.gpa.destroy(new_extern_fn); + const new_extern_fn = try gpa.create(Module.ExternFn); + errdefer gpa.destroy(new_extern_fn); new_extern_fn.* = Module.ExternFn{ .owner_decl = sema.owner_decl_index, @@ -9232,10 +9207,6 @@ fn funcCommon( .branch_quota = default_branch_quota, .is_noinline = is_noinline, }; - if (maybe_inferred_error_set_node) |node| { - new_func.inferred_error_sets.prepend(node); - } - maybe_inferred_error_set_node = null; fn_payload.* = .{ .base = .{ .tag = .function }, .data = new_func, @@ -10139,6 +10110,7 @@ fn zirSwitchCapture( defer tracy.end(); const mod = sema.mod; + const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; @@ -10248,7 +10220,7 @@ fn zirSwitchCapture( const capture_src = raw_capture_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const raw_first_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 0 } }; const first_item_src = raw_first_item_src.resolve(mod, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); @@ -10294,20 +10266,16 @@ fn zirSwitchCapture( }, .ErrorSet => { if (is_multi) { - var names: Module.ErrorSet.NameMap = .{}; + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, items.len); for (items) |item| { const item_ref = try sema.resolveInst(item); // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - names.putAssumeCapacityNoClobber( - item_val.getError().?, - {}, - ); + const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError().?); + names.putAssumeCapacityNoClobber(name_ip, {}); } - // names must be sorted - Module.ErrorSet.sortNames(&names); - const else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names); + const else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); return sema.bitCast(block, else_error_ty, operand, operand_src, null); } else { @@ -10315,7 +10283,7 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const item_ty = try Type.Tag.error_set_single.create(sema.arena, item_val.getError().?); + const item_ty = try mod.singleErrorSetType(item_val.getError().?); return sema.bitCast(block, item_ty, operand, operand_src, null); } }, @@ -10678,7 +10646,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError try sema.resolveInferredErrorSetTy(block, src, operand_ty); - if (operand_ty.isAnyError()) { + if (operand_ty.isAnyError(mod)) { if (special_prong != .@"else") { return sema.fail( block, @@ -10692,7 +10660,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var maybe_msg: ?*Module.ErrorMsg = null; errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); - for (operand_ty.errorSetNames()) |error_name| { + for (operand_ty.errorSetNames(mod)) |error_name_ip| { + const error_name = mod.intern_pool.stringToSlice(error_name_ip); if (!seen_errors.contains(error_name) and special_prong != .@"else") { const msg = maybe_msg orelse blk: { maybe_msg = try sema.errMsg( @@ -10720,7 +10689,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.failWithOwnedErrorMsg(msg); } - if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames().len) { + if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames(mod).len) { // In order to enable common patterns for generic code allow simple else bodies // else => unreachable, // else => return, @@ -10757,18 +10726,18 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError ); } - const error_names = operand_ty.errorSetNames(); - var names: Module.ErrorSet.NameMap = .{}; + const error_names = operand_ty.errorSetNames(mod); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, error_names.len); - for (error_names) |error_name| { + for (error_names) |error_name_ip| { + const error_name = mod.intern_pool.stringToSlice(error_name_ip); if (seen_errors.contains(error_name)) continue; - names.putAssumeCapacityNoClobber(error_name, {}); + names.putAssumeCapacityNoClobber(error_name_ip, {}); } - - // names must be sorted - Module.ErrorSet.sortNames(&names); - else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names); + // No need to keep the hash map metadata correct; here we + // extract the (sorted) keys only. + else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); } }, .Int, .ComptimeInt => { @@ -11513,12 +11482,13 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } }, .ErrorSet => { - if (operand_ty.isAnyError()) { + if (operand_ty.isAnyError(mod)) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ operand_ty.fmt(mod), }); } - for (operand_ty.errorSetNames()) |error_name| { + for (operand_ty.errorSetNames(mod)) |error_name_ip| { + const error_name = mod.intern_pool.stringToSlice(error_name_ip); if (seen_errors.contains(error_name)) continue; cases_len += 1; @@ -11931,7 +11901,8 @@ fn validateSwitchNoRange( } fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref) !bool { - if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) return false; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false; const tags = sema.code.instructions.items(.tag); for (body) |inst| { @@ -11967,7 +11938,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op .as_node => try sema.zirAsNode(block, inst), .field_val => try sema.zirFieldVal(block, inst), .@"unreachable" => { - if (!sema.mod.comp.formatted_panics) { + if (!mod.comp.formatted_panics) { try sema.safetyPanic(block, .unwrap_error); return true; } @@ -11990,7 +11961,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op }, else => unreachable, }; - if (sema.typeOf(air_inst).isNoReturn()) + if (sema.typeOf(air_inst).isNoReturn(mod)) return true; sema.inst_map.putAssumeCapacity(inst, air_inst); } @@ -12194,13 +12165,14 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const err_name = inst_data.get(sema.code); // Return the error code from the function. - const kv = try sema.mod.getErrorValue(err_name); + const kv = try mod.getErrorValue(err_name); const result_inst = try sema.addConstant( - try Type.Tag.error_set_single.create(sema.arena, kv.key), + try mod.singleErrorSetType(kv.key), try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), ); return result_inst; @@ -15737,7 +15709,7 @@ fn zirClosureCapture( Value.@"unreachable"; try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{ - .ty = try sema.typeOf(operand).copy(sema.perm_arena), + .ty = sema.typeOf(operand), .val = try val.copy(sema.perm_arena), }); } @@ -16223,10 +16195,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index); - break :t try set_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); + break :t set_field_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(try error_field_ty.copy(sema.arena)); + try sema.queueFullTypeResolution(error_field_ty); // If the error set is inferred it must be resolved at this point try sema.resolveInferredErrorSetTy(block, src, ty); @@ -16234,11 +16206,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise - const error_field_vals: ?[]Value = if (ty.isAnyError()) null else blk: { - const names = ty.errorSetNames(); + const error_field_vals: ?[]Value = if (ty.isAnyError(mod)) null else blk: { + const names = ty.errorSetNames(mod); const vals = try fields_anon_decl.arena().alloc(Value, names.len); - for (vals, 0..) |*field_val, i| { - const name = names[i]; + for (vals, names) |*field_val, name_ip| { + const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16301,9 +16273,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ErrorUnion => { const field_values = try sema.arena.alloc(Value, 2); // error_set: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet()); + field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet(mod)); // payload: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload()); + field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload(mod)); return sema.addConstant( type_info_ty, @@ -16332,7 +16304,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index); - break :t try enum_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); + break :t enum_field_ty_decl.val.toType(); }; const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_type.names.len); @@ -16416,7 +16388,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index); - break :t try union_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); + break :t union_field_ty_decl.val.toType(); }; const union_ty = try sema.resolveTypeFields(ty); @@ -16523,7 +16495,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); - break :t try struct_field_ty_decl.val.toType().copy(fields_anon_decl.arena()); + break :t struct_field_ty_decl.val.toType(); }; const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout @@ -16733,9 +16705,9 @@ fn typeInfoDecls( try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); - break :t try declaration_ty_decl.val.toType().copy(decls_anon_decl.arena()); + break :t declaration_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena)); + try sema.queueFullTypeResolution(declaration_ty); var decl_vals = std.ArrayList(Value).init(sema.gpa); defer decl_vals.deinit(); @@ -17018,12 +16990,12 @@ fn zirBoolBr( _ = try lhs_block.addBr(block_inst, lhs_result); const rhs_result = try sema.resolveBody(rhs_block, body, inst); - if (!sema.typeOf(rhs_result).isNoReturn()) { + if (!sema.typeOf(rhs_result).isNoReturn(mod)) { _ = try rhs_block.addBr(block_inst, rhs_result); } const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst); - if (!sema.typeOf(rhs_result).isNoReturn()) { + if (!sema.typeOf(rhs_result).isNoReturn(mod)) { if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| { if (is_bool_or and rhs_val.toBool(mod)) { return Air.Inst.Ref.bool_true; @@ -17211,7 +17183,7 @@ fn zirCondbr( const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); assert(operand_ty.zigTypeTag(mod) == .ErrorUnion); - const result_ty = operand_ty.errorUnionSet(); + const result_ty = operand_ty.errorUnionSet(mod); break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand); }; @@ -17318,7 +17290,7 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand_ty = sema.typeOf(operand); const ptr_info = operand_ty.ptrInfo(mod); const res_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = err_union_ty.errorUnionPayload(), + .pointee_type = err_union_ty.errorUnionPayload(mod), .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, .@"allowzero" = ptr_info.@"allowzero", @@ -17414,14 +17386,15 @@ fn zirRetErrValue( block: *Block, inst: Zir.Inst.Index, ) CompileError!Zir.Inst.Index { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const err_name = inst_data.get(sema.code); const src = inst_data.src(); // Return the error code from the function. - const kv = try sema.mod.getErrorValue(err_name); + const kv = try mod.getErrorValue(err_name); const result_inst = try sema.addConstant( - try Type.Tag.error_set_single.create(sema.arena, kv.key), + try mod.singleErrorSetType(err_name), try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), ); return sema.analyzeRet(block, result_inst, src); @@ -17632,17 +17605,15 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); - if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { + if (mod.typeToInferredErrorSet(sema.fn_ret_ty.errorUnionSet(mod))) |ies| { const op_ty = sema.typeOf(uncasted_operand); switch (op_ty.zigTypeTag(mod)) { - .ErrorSet => { - try payload.data.addErrorSet(sema.gpa, op_ty); - }, - .ErrorUnion => { - try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet()); - }, + .ErrorSet => try ies.addErrorSet(op_ty, ip, gpa), + .ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, gpa), else => {}, } } @@ -18521,7 +18492,7 @@ fn addConstantMaybeRef( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( - try ty.copy(anon_decl.arena()), + ty, try val.copy(anon_decl.arena()), 0, // default alignment ); @@ -18595,7 +18566,7 @@ fn fieldType( continue; }, .ErrorUnion => { - cur_ty = cur_ty.errorUnionPayload(); + cur_ty = cur_ty.errorUnionPayload(mod); continue; }, else => {}, @@ -18641,7 +18612,7 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); - if (ty.isNoReturn()) { + if (ty.isNoReturn(mod)) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } const val = try ty.lazyAbiAlignment(mod, sema.arena); @@ -18929,7 +18900,7 @@ fn zirReify( const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, - .pointee_type = try elem_ty.copy(sema.arena), + .pointee_type = elem_ty, }); const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; break :s sent_val.toIntern(); @@ -18993,7 +18964,7 @@ fn zirReify( const sentinel_val = struct_val[2]; const len = len_val.toUnsignedInt(mod); - const child_ty = try child_val.toType().copy(sema.arena); + const child_ty = child_val.toType(); const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, @@ -19011,7 +18982,7 @@ fn zirReify( // child: type, const child_val = struct_val[0]; - const child_ty = try child_val.toType().copy(sema.arena); + const child_ty = child_val.toType(); const ty = try Type.optional(sema.arena, child_ty, mod); return sema.addType(ty); @@ -19024,17 +18995,14 @@ fn zirReify( // payload: type, const payload_val = struct_val[1]; - const error_set_ty = try error_set_val.toType().copy(sema.arena); - const payload_ty = try payload_val.toType().copy(sema.arena); + const error_set_ty = error_set_val.toType(); + const payload_ty = payload_val.toType(); if (error_set_ty.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{}); } - const ty = try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = payload_ty, - }); + const ty = try mod.errorUnionType(error_set_ty, payload_ty); return sema.addType(ty); }, .ErrorSet => { @@ -19043,27 +19011,23 @@ fn zirReify( const slice_val = payload_val.castTag(.slice).?.data; const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod)); - var names: Module.ErrorSet.NameMap = .{}; + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); - var i: usize = 0; - while (i < len) : (i += 1) { + for (0..len) |i| { const elem_val = try slice_val.ptr.elemValue(mod, i); const struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); - - const kv = try mod.getErrorValue(name_str); - const gop = names.getOrPutAssumeCapacity(kv.key); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name_str); + const gop = names.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { return sema.fail(block, src, "duplicate error '{s}'", .{name_str}); } } - // names must be sorted - Module.ErrorSet.sortNames(&names); - const ty = try Type.Tag.error_set_merged.create(sema.arena, names); + const ty = try mod.errorSetFromUnsortedNames(names.keys()); return sema.addType(ty); }, .Struct => { @@ -19378,7 +19342,7 @@ fn zirReify( return sema.fail(block, src, "duplicate union field {s}", .{field_name}); } - const field_ty = try type_val.toType().copy(new_decl_arena_allocator); + const field_ty = type_val.toType(); gop.value_ptr.* = .{ .ty = field_ty, .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?), @@ -19673,7 +19637,7 @@ fn reifyStruct( return sema.fail(block, src, "comptime field without default initialization value", .{}); } - const field_ty = try type_val.toType().copy(new_decl_arena_allocator); + const field_ty = type_val.toType(); gop.value_ptr.* = .{ .ty = field_ty, .abi_align = abi_align, @@ -19751,7 +19715,7 @@ fn reifyStruct( if (backing_int_val.optionalValue(mod)) |payload| { const backing_int_ty = payload.toType(); try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); - struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator); + struct_obj.backing_int_ty = backing_int_ty; } else { struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } @@ -20035,6 +19999,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20050,22 +20016,27 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (disjoint: { // Try avoiding resolving inferred error sets if we can - if (!dest_ty.isAnyError() and dest_ty.errorSetNames().len == 0) break :disjoint true; - if (!operand_ty.isAnyError() and operand_ty.errorSetNames().len == 0) break :disjoint true; - if (dest_ty.isAnyError()) break :disjoint false; - if (operand_ty.isAnyError()) break :disjoint false; - for (dest_ty.errorSetNames()) |dest_err_name| - if (operand_ty.errorSetHasField(dest_err_name)) + if (!dest_ty.isAnyError(mod) and dest_ty.errorSetNames(mod).len == 0) break :disjoint true; + if (!operand_ty.isAnyError(mod) and operand_ty.errorSetNames(mod).len == 0) break :disjoint true; + if (dest_ty.isAnyError(mod)) break :disjoint false; + if (operand_ty.isAnyError(mod)) break :disjoint false; + for (dest_ty.errorSetNames(mod)) |dest_err_name| { + if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) break :disjoint false; + } - if (dest_ty.tag() != .error_set_inferred and operand_ty.tag() != .error_set_inferred) + if (!ip.isInferredErrorSetType(dest_ty.ip_index) and + !ip.isInferredErrorSetType(operand_ty.ip_index)) + { break :disjoint true; + } try sema.resolveInferredErrorSetTy(block, dest_ty_src, dest_ty); try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty); - for (dest_ty.errorSetNames()) |dest_err_name| - if (operand_ty.errorSetHasField(dest_err_name)) + for (dest_ty.errorSetNames(mod)) |dest_err_name| { + if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) break :disjoint false; + } break :disjoint true; }) { @@ -20085,9 +20056,9 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } if (maybe_operand_val) |val| { - if (!dest_ty.isAnyError()) { + if (!dest_ty.isAnyError(mod)) { const error_name = val.castTag(.@"error").?.data.name; - if (!dest_ty.errorSetHasField(error_name)) { + if (!dest_ty.errorSetHasField(error_name, mod)) { const msg = msg: { const msg = try sema.errMsg( block, @@ -20107,7 +20078,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and !dest_ty.isAnyError() and sema.mod.backendSupportsFeature(.error_set_has_value)) { + if (block.wantSafety() and !dest_ty.isAnyError(mod) and sema.mod.backendSupportsFeature(.error_set_has_value)) { const err_int_inst = try block.addBitCast(Type.err_int, operand); const ok = try block.addTyOp(.error_set_has_value, dest_ty, err_int_inst); try sema.addSafetyCheck(block, ok, .invalid_error_code); @@ -22862,7 +22833,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, "return type must be comptime-known"); - const ty = try val.toType().copy(sema.arena); + const ty = val.toType(); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); @@ -22873,7 +22844,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - const ty = try ret_ty_tv.val.toType().copy(sema.arena); + const ty = ret_ty_tv.val.toType(); break :blk ty; } else Type.void; @@ -23360,7 +23331,7 @@ fn validateRunTimeType( }, .Array, .Vector => ty = ty.childType(mod), - .ErrorUnion => ty = ty.errorUnionPayload(), + .ErrorUnion => ty = ty.errorUnionPayload(mod), .Struct, .Union => { const resolved_ty = try sema.resolveTypeFields(ty); @@ -23452,7 +23423,7 @@ fn explainWhyTypeIsComptimeInner( try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set); }, .ErrorUnion => { - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(mod), type_set); }, .Struct => { @@ -24065,7 +24036,9 @@ fn fieldVal( // in `fieldPtr`. This function takes a value and returns a value. const mod = sema.mod; + const gpa = sema.gpa; const arena = sema.arena; + const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -24147,27 +24120,33 @@ fn fieldVal( switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { - const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { - if (payload.data.names.getEntry(field_name)) |entry| { - break :blk entry.key_ptr.*; - } - const msg = msg: { - const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(mod), - }); - errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, child_type); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } else (try mod.getErrorValue(field_name)).key; + const name = try ip.getOrPutString(gpa, field_name); + switch (ip.indexToKey(child_type.ip_index)) { + .error_set_type => |error_set_type| blk: { + if (error_set_type.nameIndex(ip, name) != null) break :blk; + const msg = msg: { + const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ + field_name, child_type.fmt(mod), + }); + errdefer msg.destroy(sema.gpa); + try sema.addDeclaredHereNote(msg, child_type); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, + .inferred_error_set_type => { + return sema.fail(block, src, "TODO handle inferred error sets here", .{}); + }, + .simple_type => |t| assert(t == .anyerror), + else => unreachable, + } return sema.addConstant( - if (!child_type.isAnyError()) - try child_type.copy(arena) + if (!child_type.isAnyError(mod)) + child_type else - try Type.Tag.error_set_single.create(arena, name), - try Value.Tag.@"error".create(arena, .{ .name = name }), + try mod.singleErrorSetTypeNts(name), + try Value.Tag.@"error".create(arena, .{ .name = ip.stringToSlice(name) }), ); }, .Union => { @@ -24252,6 +24231,8 @@ fn fieldPtr( // in `fieldVal`. This function takes a pointer and returns a pointer. const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { @@ -24362,24 +24343,33 @@ fn fieldPtr( switch (child_type.zigTypeTag(mod)) { .ErrorSet => { - // TODO resolve inferred error sets - const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { - if (payload.data.names.getEntry(field_name)) |entry| { - break :blk entry.key_ptr.*; - } - return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(mod), - }); - } else (try mod.getErrorValue(field_name)).key; + const name = try ip.getOrPutString(gpa, field_name); + switch (ip.indexToKey(child_type.ip_index)) { + .error_set_type => |error_set_type| blk: { + if (error_set_type.nameIndex(ip, name) != null) { + break :blk; + } + return sema.fail(block, src, "no error named '{s}' in '{}'", .{ + field_name, child_type.fmt(mod), + }); + }, + .inferred_error_set_type => { + return sema.fail(block, src, "TODO handle inferred error sets here", .{}); + }, + .simple_type => |t| assert(t == .anyerror), + else => unreachable, + } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - if (!child_type.isAnyError()) - try child_type.copy(anon_decl.arena()) + if (!child_type.isAnyError(mod)) + child_type else - try Type.Tag.error_set_single.create(anon_decl.arena(), name), - try Value.Tag.@"error".create(anon_decl.arena(), .{ .name = name }), + try mod.singleErrorSetTypeNts(name), + try Value.Tag.@"error".create(anon_decl.arena(), .{ + .name = ip.stringToSlice(name), + }), 0, // default alignment )); }, @@ -24589,7 +24579,7 @@ fn fieldCallBind( } }; } } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and - first_param_type.errorUnionPayload().eql(concrete_ty, mod)) + first_param_type.errorUnionPayload(mod).eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ @@ -24832,7 +24822,7 @@ fn structFieldPtrByIndex( if (field.is_comptime) { const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = try field.ty.copy(sema.arena), + .field_ty = field.ty, .field_val = try field.default_val.copy(sema.arena), }); return sema.addConstant(ptr_field_ty, val); @@ -26227,7 +26217,7 @@ fn coerceExtra( .none => switch (inst_val.tag()) { .eu_payload => { const payload = try sema.addConstant( - inst_ty.errorUnionPayload(), + inst_ty.errorUnionPayload(mod), inst_val.castTag(.eu_payload).?.data, ); return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { @@ -26240,7 +26230,7 @@ fn coerceExtra( else => {}, } const error_set = try sema.addConstant( - inst_ty.errorUnionSet(), + inst_ty.errorUnionSet(mod), inst_val, ); return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); @@ -26342,7 +26332,7 @@ fn coerceExtra( // E!T to T if (inst_ty.zigTypeTag(mod) == .ErrorUnion and - (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) + (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{}); try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{}); @@ -26393,7 +26383,7 @@ const InMemoryCoercionResult = union(enum) { optional_shape: Pair, optional_child: PairAndChild, from_anyerror, - missing_error: []const []const u8, + missing_error: []const InternPool.NullTerminatedString, /// true if wanted is var args fn_var_args: bool, /// true if wanted is generic @@ -26567,7 +26557,8 @@ const InMemoryCoercionResult = union(enum) { break; }, .missing_error => |missing_errors| { - for (missing_errors) |err| { + for (missing_errors) |err_index| { + const err = mod.intern_pool.stringToSlice(err_index); try sema.errNote(block, src, msg, "'error.{s}' not a member of destination error set", .{err}); } break; @@ -26813,8 +26804,8 @@ fn coerceInMemoryAllowed( // Error Unions if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) { - const dest_payload = dest_ty.errorUnionPayload(); - const src_payload = src_ty.errorUnionPayload(); + const dest_payload = dest_ty.errorUnionPayload(mod); + const src_payload = src_ty.errorUnionPayload(mod); const child = try sema.coerceInMemoryAllowed(block, dest_payload, src_payload, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .error_union_payload = .{ @@ -26823,7 +26814,7 @@ fn coerceInMemoryAllowed( .wanted = dest_payload, } }; } - return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(), src_ty.errorUnionSet(), dest_is_mut, target, dest_src, src_src); + return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(mod), src_ty.errorUnionSet(mod), dest_is_mut, target, dest_src, src_src); } // Error Sets @@ -26903,8 +26894,8 @@ fn coerceInMemoryAllowed( if (child != .ok) { return InMemoryCoercionResult{ .optional_child = .{ .child = try child.dupe(sema.arena), - .actual = try src_child_type.copy(sema.arena), - .wanted = try dest_child_type.copy(sema.arena), + .actual = src_child_type, + .wanted = dest_child_type, } }; } @@ -26926,133 +26917,100 @@ fn coerceInMemoryAllowedErrorSets( src_src: LazySrcLoc, ) !InMemoryCoercionResult { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; // Coercion to `anyerror`. Note that this check can return false negatives // in case the error sets did not get resolved. - if (dest_ty.isAnyError()) { + if (dest_ty.isAnyError(mod)) { return .ok; } - if (dest_ty.castTag(.error_set_inferred)) |dst_payload| { - const dst_ies = dst_payload.data; + if (mod.typeToInferredErrorSetIndex(dest_ty).unwrap()) |dst_ies_index| { + const dst_ies = mod.inferredErrorSetPtr(dst_ies_index); // We will make an effort to return `ok` without resolving either error set, to // avoid unnecessary "unable to resolve error set" dependency loop errors. switch (src_ty.ip_index) { - .none => switch (src_ty.tag()) { - .error_set_inferred => { + .anyerror_type => {}, + else => switch (ip.indexToKey(src_ty.ip_index)) { + .inferred_error_set_type => |src_index| { // If both are inferred error sets of functions, and // the dest includes the source function, the coercion is OK. // This check is important because it works without forcing a full resolution // of inferred error sets. - const src_ies = src_ty.castTag(.error_set_inferred).?.data; - - if (dst_ies.inferred_error_sets.contains(src_ies)) { + if (dst_ies.inferred_error_sets.contains(src_index)) { return .ok; } }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dst_ies.errors.contains(name)) return .ok; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; - }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { if (!dst_ies.errors.contains(name)) break; } else return .ok; }, else => unreachable, }, - .anyerror_type => {}, - else => switch (mod.intern_pool.indexToKey(src_ty.ip_index)) { - else => @panic("TODO"), - }, } if (dst_ies.func == sema.owner_func) { // We are trying to coerce an error set to the current function's // inferred error set. - try dst_ies.addErrorSet(sema.gpa, src_ty); + try dst_ies.addErrorSet(src_ty, ip, gpa); return .ok; } - try sema.resolveInferredErrorSet(block, dest_src, dst_payload.data); + try sema.resolveInferredErrorSet(block, dest_src, dst_ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (dest_ty.isAnyError()) { + if (dest_ty.isAnyError(mod)) { return .ok; } } - var missing_error_buf = std.ArrayList([]const u8).init(sema.gpa); + var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa); defer missing_error_buf.deinit(); switch (src_ty.ip_index) { - .none => switch (src_ty.tag()) { - .error_set_inferred => { - const src_data = src_ty.castTag(.error_set_inferred).?.data; + .anyerror_type => switch (ip.indexToKey(dest_ty.ip_index)) { + .inferred_error_set_type => unreachable, // Caught by dest_ty.isAnyError(mod) above. + .simple_type => unreachable, // filtered out above + .error_set_type => return .from_anyerror, + else => unreachable, + }, + + else => switch (ip.indexToKey(src_ty.ip_index)) { + .inferred_error_set_type => |src_index| { + const src_data = mod.inferredErrorSetPtr(src_index); - try sema.resolveInferredErrorSet(block, src_src, src_data); + try sema.resolveInferredErrorSet(block, src_src, src_index); // src anyerror status might have changed after the resolution. - if (src_ty.isAnyError()) { - // dest_ty.isAnyError() == true is already checked for at this point. + if (src_ty.isAnyError(mod)) { + // dest_ty.isAnyError(mod) == true is already checked for at this point. return .from_anyerror; } for (src_data.errors.keys()) |key| { - if (!dest_ty.errorSetHasField(key)) { + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) { try missing_error_buf.append(key); } } if (missing_error_buf.items.len != 0) { return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } - - return .ok; - }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dest_ty.errorSetHasField(name)) { - return .ok; - } - const list = try sema.arena.alloc([]const u8, 1); - list[0] = name; - return InMemoryCoercionResult{ .missing_error = list }; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { - try missing_error_buf.append(name); - } - } - - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), + .missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items), }; } return .ok; }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) { try missing_error_buf.append(name); } } if (missing_error_buf.items.len != 0) { return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), + .missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items), }; } @@ -27060,18 +27018,6 @@ fn coerceInMemoryAllowedErrorSets( }, else => unreachable, }, - - .anyerror_type => switch (dest_ty.ip_index) { - .none => switch (dest_ty.tag()) { - .error_set_inferred => unreachable, // Caught by dest_ty.isAnyError() above. - .error_set_single, .error_set_merged, .error_set => return .from_anyerror, - else => unreachable, - }, - .anyerror_type => unreachable, // Filtered out above. - else => @panic("TODO"), - }, - - else => @panic("TODO"), } unreachable; @@ -28029,7 +27975,7 @@ fn beginComptimePtrMutation( var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); switch (parent.pointee) { .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(); + const payload_ty = parent.ty.errorUnionPayload(mod); if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, @@ -28402,7 +28348,7 @@ fn beginComptimePtrLoad( => blk: { const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; const payload_ty = switch (ptr_val.tag()) { - .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), + .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(mod), .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), else => unreachable, }; @@ -29301,7 +29247,7 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( - try ty.copy(anon_decl.arena()), + ty, try val.copy(anon_decl.arena()), 0, // default alignment ); @@ -29387,7 +29333,7 @@ fn analyzeRef( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try operand_ty.copy(anon_decl.arena()), + operand_ty, try val.copy(anon_decl.arena()), 0, // default alignment )); @@ -29555,7 +29501,7 @@ fn analyzeIsNonErrComptimeOnly( if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); - const payload_ty = operand_ty.errorUnionPayload(); + const payload_ty = operand_ty.errorUnionPayload(mod); if (payload_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } @@ -29577,23 +29523,28 @@ fn analyzeIsNonErrComptimeOnly( // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. - const set_ty = operand_ty.errorUnionSet(); + const set_ty = operand_ty.errorUnionSet(mod); switch (set_ty.ip_index) { - .none => switch (set_ty.tag()) { - .error_set_inferred => blk: { + .anyerror_type => {}, + else => switch (mod.intern_pool.indexToKey(set_ty.ip_index)) { + .error_set_type => |error_set_type| { + if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true; + }, + .inferred_error_set_type => |ies_index| blk: { // If the error set is empty, we must return a comptime true or false. // However we want to avoid unnecessarily resolving an inferred error set // in case it is already non-empty. - const ies = set_ty.castTag(.error_set_inferred).?.data; + const ies = mod.inferredErrorSetPtr(ies_index); if (ies.is_anyerror) break :blk; if (ies.errors.count() != 0) break :blk; if (maybe_operand_val == null) { // Try to avoid resolving inferred error set if possible. if (ies.errors.count() != 0) break :blk; if (ies.is_anyerror) break :blk; - for (ies.inferred_error_sets.keys()) |other_ies| { - if (ies == other_ies) continue; - try sema.resolveInferredErrorSet(block, src, other_ies); + for (ies.inferred_error_sets.keys()) |other_ies_index| { + if (ies_index == other_ies_index) continue; + try sema.resolveInferredErrorSet(block, src, other_ies_index); + const other_ies = mod.inferredErrorSetPtr(other_ies_index); if (other_ies.is_anyerror) { ies.is_anyerror = true; ies.is_resolved = true; @@ -29608,18 +29559,12 @@ fn analyzeIsNonErrComptimeOnly( // so far with this type can't contain errors either. return Air.Inst.Ref.bool_true; } - try sema.resolveInferredErrorSet(block, src, ies); + try sema.resolveInferredErrorSet(block, src, ies_index); if (ies.is_anyerror) break :blk; if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; } }, - else => if (set_ty.errorSetNames().len == 0) return Air.Inst.Ref.bool_true, - }, - - .anyerror_type => {}, - - else => switch (mod.intern_pool.indexToKey(set_ty.ip_index)) { - else => @panic("TODO"), + else => unreachable, }, } @@ -30516,7 +30461,8 @@ fn wrapErrorUnionPayload( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const dest_payload_ty = dest_ty.errorUnionPayload(); + const mod = sema.mod; + const dest_payload_ty = dest_ty.errorUnionPayload(mod); const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveMaybeUndefVal(coerced)) |val| { return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val)); @@ -30533,51 +30479,41 @@ fn wrapErrorUnionSet( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); - const dest_err_set_ty = dest_ty.errorUnionSet(); + const dest_err_set_ty = dest_ty.errorUnionSet(mod); if (try sema.resolveMaybeUndefVal(inst)) |val| { switch (dest_err_set_ty.ip_index) { .anyerror_type => {}, - - .none => switch (dest_err_set_ty.tag()) { - .error_set_single => ok: { - const expected_name = val.castTag(.@"error").?.data.name; - const n = dest_err_set_ty.castTag(.error_set_single).?.data; - if (mem.eql(u8, expected_name, n)) break :ok; - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - }, - .error_set => { + else => switch (ip.indexToKey(dest_err_set_ty.ip_index)) { + .error_set_type => |error_set_type| ok: { const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set).?.data; - if (!error_set.names.contains(expected_name)) { - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); + if (ip.getString(expected_name).unwrap()) |expected_name_interned| { + if (error_set_type.nameIndex(ip, expected_name_interned) != null) + break :ok; } + return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, - .error_set_inferred => ok: { + .inferred_error_set_type => |ies_index| ok: { + const ies = mod.inferredErrorSetPtr(ies_index); const expected_name = val.castTag(.@"error").?.data.name; - const ies = dest_err_set_ty.castTag(.error_set_inferred).?.data; // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. if (ies.is_anyerror) break :ok; - if (ies.errors.contains(expected_name)) break :ok; + + if (ip.getString(expected_name).unwrap()) |expected_name_interned| { + if (ies.errors.contains(expected_name_interned)) break :ok; + } if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { break :ok; } return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, - .error_set_merged => { - const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data; - if (!error_set.contains(expected_name)) { - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - } - }, else => unreachable, }, - - else => @panic("TODO"), } return sema.addConstant(dest_ty, val); } @@ -30743,11 +30679,11 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; }, .ErrorUnion => { - const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); + const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_ty, src, src)) { continue; @@ -30757,7 +30693,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; }, else => { @@ -30770,7 +30706,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; } else { err_set_ty = candidate_ty; @@ -30781,14 +30717,14 @@ fn resolvePeerTypes( .ErrorUnion => switch (chosen_ty_tag) { .ErrorSet => { const chosen_set_ty = err_set_ty orelse chosen_ty; - const candidate_set_ty = candidate_ty.errorUnionSet(); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = null; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } chosen = candidate; chosen_i = candidate_i + 1; @@ -30796,8 +30732,8 @@ fn resolvePeerTypes( }, .ErrorUnion => { - const chosen_payload_ty = chosen_ty.errorUnionPayload(); - const candidate_payload_ty = candidate_ty.errorUnionPayload(); + const chosen_payload_ty = chosen_ty.errorUnionPayload(mod); + const candidate_payload_ty = candidate_ty.errorUnionPayload(mod); const coerce_chosen = (try sema.coerceInMemoryAllowed(block, chosen_payload_ty, candidate_payload_ty, false, target, src, src)) == .ok; const coerce_candidate = (try sema.coerceInMemoryAllowed(block, candidate_payload_ty, chosen_payload_ty, false, target, src, src)) == .ok; @@ -30811,15 +30747,15 @@ fn resolvePeerTypes( chosen_i = candidate_i + 1; } - const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - const candidate_set_ty = candidate_ty.errorUnionSet(); + const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = candidate_set_ty; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } continue; } @@ -30827,13 +30763,13 @@ fn resolvePeerTypes( else => { if (err_set_ty) |chosen_set_ty| { - const candidate_set_ty = candidate_ty.errorUnionSet(); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = null; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } } seen_const = seen_const or chosen_ty.isConstPtr(mod); @@ -30963,7 +30899,7 @@ fn resolvePeerTypes( } }, .ErrorUnion => { - const chosen_ptr_ty = chosen_ty.errorUnionPayload(); + const chosen_ptr_ty = chosen_ty.errorUnionPayload(mod); if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { const chosen_info = chosen_ptr_ty.ptrInfo(mod); @@ -31073,7 +31009,7 @@ fn resolvePeerTypes( } }, .ErrorUnion => { - const payload_ty = chosen_ty.errorUnionPayload(); + const payload_ty = chosen_ty.errorUnionPayload(mod); if ((try sema.coerceInMemoryAllowed(block, payload_ty, candidate_ty, false, target, src, src)) == .ok) { continue; } @@ -31090,7 +31026,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, chosen_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, chosen_ty); continue; } else { err_set_ty = chosen_ty; @@ -31148,14 +31084,14 @@ fn resolvePeerTypes( else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); } if (seen_const) { // turn []T => []const T switch (chosen_ty.zigTypeTag(mod)) { .ErrorUnion => { - const ptr_ty = chosen_ty.errorUnionPayload(); + const ptr_ty = chosen_ty.errorUnionPayload(mod); var info = ptr_ty.ptrInfo(mod); info.mutable = false; const new_ptr_ty = try Type.ptr(sema.arena, mod, info); @@ -31163,8 +31099,8 @@ fn resolvePeerTypes( try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; - const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); + const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); }, .Pointer => { var info = chosen_ty.ptrInfo(mod); @@ -31175,7 +31111,7 @@ fn resolvePeerTypes( else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); }, else => return chosen_ty, } @@ -31187,16 +31123,16 @@ fn resolvePeerTypes( else => try Type.optional(sema.arena, chosen_ty, mod), }; const set_ty = err_set_ty orelse return opt_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ty, mod); + return try mod.errorUnionType(set_ty, opt_ty); } if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag(mod)) { .ErrorSet => return ty, .ErrorUnion => { - const payload_ty = chosen_ty.errorUnionPayload(); - return try Type.errorUnion(sema.arena, ty, payload_ty, mod); + const payload_ty = chosen_ty.errorUnionPayload(mod); + return try mod.errorUnionType(ty, payload_ty); }, - else => return try Type.errorUnion(sema.arena, ty, chosen_ty, mod), + else => return try mod.errorUnionType(ty, chosen_ty), }; return chosen_ty; @@ -31279,7 +31215,7 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { return sema.resolveTypeLayout(payload_ty); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); return sema.resolveTypeLayout(payload_ty); }, .Fn => { @@ -31465,7 +31401,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum); - struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator); + struct_obj.backing_int_ty = backing_int_ty; try wip_captures.finalize(); } else { if (fields_bit_sum > std.math.maxInt(u16)) { @@ -31605,18 +31541,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return switch (ty.ip_index) { .empty_struct_type => false, - .none => switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => false, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, .ptr_type => |ptr_type| { @@ -31635,6 +31559,8 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), .error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()), + .error_set_type, .inferred_error_set_type => false, + .func_type => true, .simple_type => |t| switch (t) { @@ -31780,7 +31706,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { .Optional => { return sema.resolveTypeFully(ty.optionalChild(mod)); }, - .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()), + .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)), .Fn => { const info = mod.typeToFunc(ty).?; if (info.is_generic) { @@ -32048,16 +31974,17 @@ fn resolveInferredErrorSet( sema: *Sema, block: *Block, src: LazySrcLoc, - ies: *Module.Fn.InferredErrorSet, + ies_index: Module.Fn.InferredErrorSet.Index, ) CompileError!void { + const mod = sema.mod; + const ies = mod.inferredErrorSetPtr(ies_index); + if (ies.is_resolved) return; if (ies.func.state == .in_progress) { return sema.fail(block, src, "unable to resolve inferred error set", .{}); } - const mod = sema.mod; - // In order to ensure that all dependencies are properly added to the set, we // need to ensure the function body is analyzed of the inferred error set. // However, in the case of comptime/inline function calls with inferred error sets, @@ -32072,7 +31999,7 @@ fn resolveInferredErrorSet( // so here we can simply skip this case. if (ies_func_info.return_type == .generic_poison_type) { assert(ies_func_info.cc == .Inline); - } else if (ies_func_info.return_type.toType().errorUnionSet().castTag(.error_set_inferred).?.data == ies) { + } else if (mod.typeToInferredErrorSet(ies_func_info.return_type.toType().errorUnionSet(mod)).? == ies) { if (ies_func_info.is_generic) { const msg = msg: { const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); @@ -32090,10 +32017,11 @@ fn resolveInferredErrorSet( ies.is_resolved = true; - for (ies.inferred_error_sets.keys()) |other_ies| { - if (ies == other_ies) continue; - try sema.resolveInferredErrorSet(block, src, other_ies); + for (ies.inferred_error_sets.keys()) |other_ies_index| { + if (ies_index == other_ies_index) continue; + try sema.resolveInferredErrorSet(block, src, other_ies_index); + const other_ies = mod.inferredErrorSetPtr(other_ies_index); for (other_ies.errors.keys()) |key| { try ies.errors.put(sema.gpa, key, {}); } @@ -32108,8 +32036,9 @@ fn resolveInferredErrorSetTy( src: LazySrcLoc, ty: Type, ) CompileError!void { - if (ty.castTag(.error_set_inferred)) |inferred| { - try sema.resolveInferredErrorSet(block, src, inferred.data); + const mod = sema.mod; + if (mod.typeToInferredErrorSetIndex(ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); } } @@ -32333,7 +32262,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } const field = &struct_obj.fields.values()[field_i]; - field.ty = try field_ty.copy(decl_arena_allocator); + field.ty = field_ty; if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { @@ -32809,7 +32738,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } gop.value_ptr.* = .{ - .ty = try field_ty.copy(decl_arena_allocator), + .ty = field_ty, .abi_align = 0, }; @@ -33038,13 +32967,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .empty_struct_type => return Value.empty_struct, .none => switch (ty.tag()) { - .error_set_single, - .error_set, - .error_set_merged, - .error_union, - .error_set_inferred, - => return null, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -33062,6 +32984,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_union_type, .func_type, .anyframe_type, + .error_set_type, + .inferred_error_set_type, => null, .array_type => |array_type| { @@ -33389,7 +33313,7 @@ fn analyzeComptimeAlloc( defer anon_decl.deinit(); const decl_index = try anon_decl.finish( - try var_type.copy(anon_decl.arena()), + var_type, // There will be stores before the first load, but they may be to sub-elements or // sub-fields. So we need to initialize with undef to allow the mechanism to expand // into fields/elements and have those overridden with stored values. @@ -33600,8 +33524,6 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { switch (ty.tag()) { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - - else => return null, } } @@ -33616,18 +33538,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { return switch (ty.ip_index) { .empty_struct_type => false, - .none => switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => false, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => return false, .ptr_type => |ptr_type| { @@ -33649,6 +33559,9 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union_type => |error_union_type| { return sema.typeRequiresComptime(error_union_type.payload_type.toType()); }, + + .error_set_type, .inferred_error_set_type => false, + .func_type => true, .simple_type => |t| return switch (t) { @@ -34410,3 +34323,23 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { .vector_index = vector_info.vector_index, }); } + +/// Merge lhs with rhs. +/// Asserts that lhs and rhs are both error sets and are resolved. +fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { + const mod = sema.mod; + const arena = sema.arena; + const lhs_names = lhs.errorSetNames(mod); + const rhs_names = rhs.errorSetNames(mod); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; + try names.ensureUnusedCapacity(arena, lhs_names.len); + + for (lhs_names) |name| { + names.putAssumeCapacityNoClobber(name, {}); + } + for (rhs_names) |name| { + try names.put(arena, name, {}); + } + + return mod.errorSetFromUnsortedNames(names.keys()); +} diff --git a/src/TypedValue.zig b/src/TypedValue.zig index ced20ac522..144b7ebf9d 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -27,13 +27,13 @@ pub const Managed = struct { /// Assumes arena allocation. Does a recursive copy. pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue { return TypedValue{ - .ty = try self.ty.copy(arena), + .ty = self.ty, .val = try self.val.copy(arena), }; } pub fn eql(a: TypedValue, b: TypedValue, mod: *Module) bool { - if (!a.ty.eql(b.ty, mod)) return false; + if (a.ty.ip_index != b.ty.ip_index) return false; return a.val.eql(b.val, a.ty, mod); } @@ -286,7 +286,7 @@ pub fn print( .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), .eu_payload => { val = val.castTag(.eu_payload).?.data; - ty = ty.errorUnionPayload(); + ty = ty.errorUnionPayload(mod); }, .opt_payload => { val = val.castTag(.opt_payload).?.data; diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 8b84189e18..c9126747da 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3065,8 +3065,8 @@ fn errUnionErr( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } @@ -3145,8 +3145,8 @@ fn errUnionPayload( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } @@ -3305,8 +3305,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; @@ -3329,8 +3329,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mod = self.bin_file.options.module.?; const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; @@ -4893,7 +4893,7 @@ fn isErr( error_union_ty: Type, ) !MCValue { const mod = self.bin_file.options.module.?; - const error_type = error_union_ty.errorUnionSet(); + const error_type = error_union_ty.errorUnionSet(mod); if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index a6a715c75d..fa8646be43 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2042,8 +2042,8 @@ fn errUnionErr( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } @@ -2119,8 +2119,8 @@ fn errUnionPayload( maybe_inst: ?Air.Inst.Index, ) !MCValue { const mod = self.bin_file.options.module.?; - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } @@ -2232,8 +2232,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; @@ -2256,8 +2256,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; @@ -4871,7 +4871,7 @@ fn isErr( error_union_ty: Type, ) !MCValue { const mod = self.bin_file.options.module.?; - const error_type = error_union_ty.errorUnionSet(); + const error_type = error_union_ty.errorUnionSet(mod); if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 072d3ed098..13f129f87b 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -2707,12 +2707,12 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.typeOf(ty_op.operand); - const payload_ty = error_union_ty.errorUnionPayload(); + const payload_ty = error_union_ty.errorUnionPayload(mod); const mcv = try self.resolveInst(ty_op.operand); - const mod = self.bin_file.options.module.?; if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); @@ -2721,11 +2721,11 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.typeOf(ty_op.operand); - const payload_ty = error_union_ty.errorUnionPayload(); - const mod = self.bin_file.options.module.?; + const payload_ty = error_union_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); @@ -2735,12 +2735,12 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const payload_ty = error_union_ty.errorUnionPayload(); + const payload_ty = error_union_ty.errorUnionPayload(mod); const mcv = try self.resolveInst(ty_op.operand); - const mod = self.bin_file.options.module.?; if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); @@ -3529,8 +3529,8 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { /// Given an error union, returns the payload fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { const mod = self.bin_file.options.module.?; - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); if (err_ty.errorSetIsEmpty(mod)) { return error_union_mcv; } @@ -4168,8 +4168,8 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const mod = self.bin_file.options.module.?; - const error_type = ty.errorUnionSet(); - const payload_type = ty.errorUnionPayload(); + const error_type = ty.errorUnionSet(mod); + const payload_type = ty.errorUnionPayload(mod); if (!error_type.hasRuntimeBits(mod)) { return MCValue{ .immediate = 0 }; // always false diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a950264840..2d7e4a8585 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1264,7 +1264,7 @@ fn genFunc(func: *CodeGen) InnerError!void { if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst = @intCast(u32, func.air.instructions.len - 1); const last_inst_ty = func.typeOfIndex(inst); - if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn()) { + if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); } } @@ -1757,7 +1757,7 @@ fn isByRef(ty: Type, mod: *Module) bool { .Int => return ty.intInfo(mod).bits > 64, .Float => return ty.floatBits(target) > 64, .ErrorUnion => { - const pl_ty = ty.errorUnionPayload(); + const pl_ty = ty.errorUnionPayload(mod); if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } @@ -2256,7 +2256,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const result_value = result_value: { if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { break :result_value WValue{ .none = {} }; - } else if (ret_ty.isNoReturn()) { + } else if (ret_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); break :result_value WValue{ .none = {} }; } else if (first_param_sret) { @@ -2346,7 +2346,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE const abi_size = ty.abiSize(mod); switch (ty.zigTypeTag(mod)) { .ErrorUnion => { - const pl_ty = ty.errorUnionPayload(); + const pl_ty = ty.errorUnionPayload(mod); if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.anyerror, 0); } @@ -3111,8 +3111,8 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => return WValue{ .imm32 = 0 }, }, .ErrorUnion => { - const error_type = ty.errorUnionSet(); - const payload_type = ty.errorUnionPayload(); + const error_type = ty.errorUnionSet(mod); + const payload_type = ty.errorUnionPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. const is_pl = val.errorUnionIsPayload(); @@ -3916,10 +3916,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); const err_union_ty = func.typeOf(un_op); - const pl_ty = err_union_ty.errorUnionPayload(); + const pl_ty = err_union_ty.errorUnionPayload(mod); const result = result: { - if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { switch (opcode) { .i32_ne => break :result WValue{ .imm32 = 0 }, .i32_eq => break :result WValue{ .imm32 = 1 }, @@ -3953,7 +3953,7 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; - const payload_ty = err_ty.errorUnionPayload(); + const payload_ty = err_ty.errorUnionPayload(mod); const result = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -3981,10 +3981,10 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) const operand = try func.resolveInst(ty_op.operand); const op_ty = func.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; - const payload_ty = err_ty.errorUnionPayload(); + const payload_ty = err_ty.errorUnionPayload(mod); const result = result: { - if (err_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (err_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { break :result WValue{ .imm32 = 0 }; } @@ -4031,7 +4031,7 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const err_ty = func.air.getRefType(ty_op.ty); - const pl_ty = err_ty.errorUnionPayload(); + const pl_ty = err_ty.errorUnionPayload(mod); const result = result: { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4044,7 +4044,7 @@ fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // write 'undefined' to the payload const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); - const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(mod)); + const len = @intCast(u32, err_ty.errorUnionPayload(mod).abiSize(mod)); try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; @@ -5362,7 +5362,7 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi const ty_op = func.air.instructions.items(.data)[inst].ty_op; const err_set_ty = func.typeOf(ty_op.operand).childType(mod); - const payload_ty = err_set_ty.errorUnionPayload(); + const payload_ty = err_set_ty.errorUnionPayload(mod); const operand = try func.resolveInst(ty_op.operand); // set error-tag to '0' to annotate error union is non-error @@ -6177,10 +6177,10 @@ fn lowerTry( return func.fail("TODO: lowerTry for pointers", .{}); } - const pl_ty = err_union_ty.errorUnionPayload(); + const pl_ty = err_union_ty.errorUnionPayload(mod); const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { // Block we can jump out of when error is not set try func.startBlock(.block, wasm.block_empty); @@ -6742,7 +6742,7 @@ fn callIntrinsic( if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { return WValue.none; - } else if (return_type.isNoReturn()) { + } else if (return_type.isNoReturn(mod)) { try func.addTag(.@"unreachable"); return WValue.none; } else if (want_sret_param) { @@ -6941,20 +6941,21 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { } fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const error_set_ty = func.air.getRefType(ty_op.ty); const result = try func.allocLocal(Type.bool); - const names = error_set_ty.errorSetNames(); + const names = error_set_ty.errorSetNames(mod); var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len); defer values.deinit(); - const mod = func.bin_file.base.options.module.?; var lowest: ?u32 = null; var highest: ?u32 = null; - for (names) |name| { + for (names) |name_ip| { + const name = mod.intern_pool.stringToSlice(name_ip); const err_int = mod.global_error_set.get(name).?; if (lowest) |*l| { if (err_int < l.*) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e83644269f..77b4e6d425 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3612,8 +3612,8 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_union_ty = self.typeOf(ty_op.operand); - const err_ty = err_union_ty.errorUnionSet(); - const payload_ty = err_union_ty.errorUnionPayload(); + const err_ty = err_union_ty.errorUnionSet(mod); + const payload_ty = err_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { @@ -3671,7 +3671,7 @@ fn genUnwrapErrorUnionPayloadMir( err_union: MCValue, ) !MCValue { const mod = self.bin_file.options.module.?; - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const result: MCValue = result: { if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; @@ -3731,8 +3731,8 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(dst_lock); const eu_ty = src_ty.childType(mod); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmRegisterMemory( @@ -3771,7 +3771,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const eu_ty = src_ty.childType(mod); - const pl_ty = eu_ty.errorUnionPayload(); + const pl_ty = eu_ty.errorUnionPayload(mod); const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( @@ -3797,8 +3797,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(src_lock); const eu_ty = src_ty.childType(mod); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmMemoryImmediate( @@ -3901,8 +3901,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { @@ -3924,8 +3924,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const result: MCValue = result: { if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); @@ -8782,7 +8782,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { const mod = self.bin_file.options.module.?; - const err_type = ty.errorUnionSet(); + const err_type = ty.errorUnionSet(mod); if (err_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false @@ -8793,7 +8793,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! self.eflags_inst = inst; } - const err_off = errUnionErrorOffset(ty.errorUnionPayload(), mod); + const err_off = errUnionErrorOffset(ty.errorUnionPayload(mod), mod); switch (operand) { .register => |reg| { const eu_lock = self.register_manager.lockReg(reg); diff --git a/src/codegen.zig b/src/codegen.zig index 8e145a3b32..775eb09ab0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -139,7 +139,7 @@ pub fn generateLazySymbol( return generateLazyFunction(bin_file, src_loc, lazy_sym, code, debug_output); } - if (lazy_sym.ty.isAnyError()) { + if (lazy_sym.ty.isAnyError(mod)) { alignment.* = 4; const err_names = mod.error_name_list.items; mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian); @@ -670,8 +670,8 @@ pub fn generateSymbol( return Result.ok; }, .ErrorUnion => { - const error_ty = typed_value.ty.errorUnionSet(); - const payload_ty = typed_value.ty.errorUnionPayload(); + const error_ty = typed_value.ty.errorUnionSet(mod); + const payload_ty = typed_value.ty.errorUnionPayload(mod); const is_payload = typed_value.val.errorUnionIsPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -894,7 +894,7 @@ fn lowerParentPtr( }, .eu_payload_ptr => { const eu_payload_ptr = parent_ptr.castTag(.eu_payload_ptr).?.data; - const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(); + const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod); return lowerParentPtr( bin_file, src_loc, @@ -1249,8 +1249,8 @@ pub fn genTypedValue( } }, .ErrorUnion => { - const error_type = typed_value.ty.errorUnionSet(); - const payload_type = typed_value.ty.errorUnionPayload(); + const error_type = typed_value.ty.errorUnionSet(mod); + const payload_type = typed_value.ty.errorUnionPayload(mod); const is_pl = typed_value.val.errorUnionIsPayload(); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index c2a108d68e..c9cc485903 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -465,7 +465,7 @@ pub const Function = struct { }), }, .data = switch (key) { - .tag_name => .{ .tag_name = try data.tag_name.copy(arena) }, + .tag_name => .{ .tag_name = data.tag_name }, .never_tail => .{ .never_tail = data.never_tail }, .never_inline => .{ .never_inline = data.never_inline }, }, @@ -862,8 +862,8 @@ pub const DeclGen = struct { return writer.writeByte('}'); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const error_ty = ty.errorUnionSet(); + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, val, location); @@ -1252,8 +1252,8 @@ pub const DeclGen = struct { } }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const error_ty = ty.errorUnionSet(); + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); const error_val = if (val.errorUnionIsPayload()) try mod.intValue(Type.anyerror, 0) else val; if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4252,6 +4252,7 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { } fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { + const mod = f.object.dg.module; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Block, ty_pl.payload); const body = f.air.extra[extra.end..][0..extra.data.body_len]; @@ -4284,7 +4285,7 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { try f.object.indent_writer.insertNewline(); // noreturn blocks have no `br` instructions reaching them, so we don't want a label - if (!f.typeOfIndex(inst).isNoReturn()) { + if (!f.typeOfIndex(inst).isNoReturn(mod)) { // label must be followed by an expression, include an empty one. try writer.print("zig_block_{d}:;\n", .{block_id}); } @@ -4322,10 +4323,10 @@ fn lowerTry( const inst_ty = f.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); const writer = f.object.writer(); - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try writer.writeAll("if ("); if (!payload_has_bits) { if (is_ptr) @@ -5500,8 +5501,8 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const operand_is_ptr = operand_ty.zigTypeTag(mod) == .Pointer; const error_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const local = try f.allocLocal(inst, inst_ty); if (!payload_ty.hasRuntimeBits(mod) and operand == .local and operand.local == local.new_local) { @@ -5539,7 +5540,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const error_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; const writer = f.object.writer(); - if (!error_union_ty.errorUnionPayload().hasRuntimeBits(mod)) { + if (!error_union_ty.errorUnionPayload(mod).hasRuntimeBits(mod)) { if (!is_ptr) return .none; const local = try f.allocLocal(inst, inst_ty); @@ -5601,9 +5602,9 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.typeOfIndex(inst); - const payload_ty = inst_ty.errorUnionPayload(); + const payload_ty = inst_ty.errorUnionPayload(mod); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); - const err_ty = inst_ty.errorUnionSet(); + const err_ty = inst_ty.errorUnionSet(mod); const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); @@ -5642,8 +5643,8 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); const error_union_ty = f.typeOf(ty_op.operand).childType(mod); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); // First, set the non-error value. if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { @@ -5691,10 +5692,10 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[inst].ty_op; const inst_ty = f.typeOfIndex(inst); - const payload_ty = inst_ty.errorUnionPayload(); + const payload_ty = inst_ty.errorUnionPayload(mod); const payload = try f.resolveInst(ty_op.operand); const repr_is_err = !payload_ty.hasRuntimeBitsIgnoreComptime(mod); - const err_ty = inst_ty.errorUnionSet(); + const err_ty = inst_ty.errorUnionSet(mod); try reap(f, inst, &.{ty_op.operand}); const writer = f.object.writer(); @@ -5729,8 +5730,8 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const const operand_ty = f.typeOf(un_op); const local = try f.allocLocal(inst, Type.bool); const err_union_ty = if (is_ptr) operand_ty.childType(mod) else operand_ty; - const payload_ty = err_union_ty.errorUnionPayload(); - const error_ty = err_union_ty.errorUnionSet(); + const payload_ty = err_union_ty.errorUnionPayload(mod); + const error_ty = err_union_ty.errorUnionSet(mod); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index 70426972af..dc1749d42e 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1680,14 +1680,14 @@ pub const CType = extern union { .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), .payload => unreachable, }) |fwd_idx| { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); if (try lookup.typeToIndex(payload_ty, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter => .complete, .global => .global, .payload => unreachable, })) |payload_idx| { - const error_ty = ty.errorUnionSet(); + const error_ty = ty.errorUnionSet(mod); if (payload_idx == Tag.void.toIndex()) { try self.initType(error_ty, kind, lookup); } else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0c503edee4..1da3d91b13 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -362,15 +362,11 @@ pub const Object = struct { decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), /// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction. named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value), - /// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of - /// the compiler, but the Type/Value memory here is backed by `type_map_arena`. - /// TODO we need to remove entries from this map in response to incremental compilation - /// but I think the frontend won't tell us about types that get deleted because - /// hasRuntimeBits() is false for types. + /// Maps Zig types to LLVM types. The table memory is backed by the GPA of + /// the compiler. + /// TODO when InternPool garbage collection is implemented, this map needs + /// to be garbage collected as well. type_map: TypeMap, - /// The backing memory for `type_map`. Periodically garbage collected after flush(). - /// The code for doing the periodical GC is not yet implemented. - type_map_arena: std.heap.ArenaAllocator, di_type_map: DITypeMap, /// The LLVM global table which holds the names corresponding to Zig errors. /// Note that the values are not added until flushModule, when all errors in @@ -381,12 +377,7 @@ pub const Object = struct { /// name collision. extern_collisions: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void), - pub const TypeMap = std.HashMapUnmanaged( - Type, - *llvm.Type, - Type.HashContext64, - std.hash_map.default_max_load_percentage, - ); + pub const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, *llvm.Type); /// This is an ArrayHashMap as opposed to a HashMap because in `flushModule` we /// want to iterate over it while adding entries to it. @@ -543,7 +534,6 @@ pub const Object = struct { .decl_map = .{}, .named_enum_map = .{}, .type_map = .{}, - .type_map_arena = std.heap.ArenaAllocator.init(gpa), .di_type_map = .{}, .error_name_table = null, .extern_collisions = .{}, @@ -563,7 +553,6 @@ pub const Object = struct { self.decl_map.deinit(gpa); self.named_enum_map.deinit(gpa); self.type_map.deinit(gpa); - self.type_map_arena.deinit(); self.extern_collisions.deinit(gpa); self.* = undefined; } @@ -1462,9 +1451,6 @@ pub const Object = struct { return o.lowerDebugTypeImpl(entry, resolve, di_type); } errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .mod = o.module })); - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try ty.copy(o.type_map_arena.allocator()); const entry: Object.DITypeMap.Entry = .{ .key_ptr = gop.key_ptr, .value_ptr = gop.value_ptr, @@ -1868,7 +1854,7 @@ pub const Object = struct { return full_di_ty; }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. @@ -2823,7 +2809,7 @@ pub const DeclGen = struct { .Opaque => { if (t.ip_index == .anyopaque_type) return dg.context.intType(8); - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); + const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; const opaque_type = mod.intern_pool.indexToKey(t.ip_index).opaque_type; @@ -2869,7 +2855,7 @@ pub const DeclGen = struct { return dg.context.structType(&fields_buf, 3, .False); }, .ErrorUnion => { - const payload_ty = t.errorUnionPayload(); + const payload_ty = t.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try dg.lowerType(Type.anyerror); } @@ -2913,13 +2899,9 @@ pub const DeclGen = struct { }, .ErrorSet => return dg.context.intType(16), .Struct => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); + const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const struct_type = switch (mod.intern_pool.indexToKey(t.ip_index)) { .anon_struct_type => |tuple| { const llvm_struct_ty = dg.context.structCreateNamed(""); @@ -3041,13 +3023,9 @@ pub const DeclGen = struct { return llvm_struct_ty; }, .Union => { - const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = mod }); + const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - // The Type memory is ephemeral; since we want to store a longer-lived - // reference, we need to copy it here. - gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator()); - const layout = t.unionGetLayout(mod); const union_obj = mod.typeToUnion(t).?; @@ -3571,7 +3549,7 @@ pub const DeclGen = struct { } }, .ErrorUnion => { - const payload_type = tv.ty.errorUnionPayload(); + const payload_type = tv.ty.errorUnionPayload(mod); const is_pl = tv.val.errorUnionIsPayload(); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { @@ -4130,7 +4108,7 @@ pub const DeclGen = struct { const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true); - const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(); + const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // In this case, we represent pointer to error union the same as pointer // to the payload. @@ -5368,7 +5346,7 @@ pub const FuncGen = struct { const inst_ty = self.typeOfIndex(inst); const parent_bb = self.context.createBasicBlock("Block"); - if (inst_ty.isNoReturn()) { + if (inst_ty.isNoReturn(mod)) { try self.genBody(body); return null; } @@ -5490,11 +5468,11 @@ pub const FuncGen = struct { is_unused: bool, ) !?*llvm.Value { const mod = fg.dg.module; - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime(mod); const err_union_llvm_ty = try fg.dg.lowerType(err_union_ty); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const is_err = err: { const err_set_ty = try fg.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); @@ -5601,6 +5579,7 @@ pub const FuncGen = struct { } fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { + const mod = self.dg.module; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; @@ -5616,7 +5595,7 @@ pub const FuncGen = struct { // would have been emitted already. Also the main loop in genBody can // be while(true) instead of for(body), which will eliminate 1 branch on // a hot path. - if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn()) { + if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(mod)) { _ = self.builder.buildBr(loop_block); } return null; @@ -6674,11 +6653,11 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const operand_ty = self.typeOf(un_op); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const err_set_ty = try self.dg.lowerType(Type.anyerror); const zero = err_set_ty.constNull(); - if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const llvm_i1 = self.context.intType(1); switch (op) { .EQ => return llvm_i1.constInt(1, .False), // 0 == 0 @@ -6825,7 +6804,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType(mod) else operand_ty; - if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const err_llvm_ty = try self.dg.lowerType(Type.anyerror); if (operand_is_ptr) { return operand; @@ -6836,7 +6815,7 @@ pub const FuncGen = struct { const err_set_llvm_ty = try self.dg.lowerType(Type.anyerror); - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (!operand_is_ptr) return operand; return self.builder.buildLoad(err_set_llvm_ty, operand, ""); @@ -6859,7 +6838,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const err_union_ty = self.typeOf(ty_op.operand).childType(mod); - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = try mod.intValue(Type.err_int, 0) }); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { _ = self.builder.buildStore(non_error_val, operand); @@ -6968,7 +6947,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_un_ty = self.typeOfIndex(inst); - const payload_ty = err_un_ty.errorUnionPayload(); + const payload_ty = err_un_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return operand; @@ -8787,13 +8766,14 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const error_set_ty = self.air.getRefType(ty_op.ty); - const names = error_set_ty.errorSetNames(); + const names = error_set_ty.errorSetNames(mod); const valid_block = self.context.appendBasicBlock(self.llvm_func, "Valid"); const invalid_block = self.context.appendBasicBlock(self.llvm_func, "Invalid"); const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); - for (names) |name| { + for (names) |name_ip| { + const name = mod.intern_pool.stringToSlice(name_ip); const err_int = mod.global_error_set.get(name).?; const this_tag_int_value = try self.dg.lowerValue(.{ .ty = Type.err_int, @@ -11095,7 +11075,7 @@ fn isByRef(ty: Type, mod: *Module) bool { else => return ty.hasRuntimeBits(mod), }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index eada74e6d4..612ac1f252 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -801,7 +801,7 @@ pub const DeclGen = struct { }, }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); const is_pl = val.errorUnionIsPayload(); const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); @@ -1365,7 +1365,7 @@ pub const DeclGen = struct { .Union => return try self.resolveUnionType(ty, null), .ErrorSet => return try self.intType(.unsigned, 16), .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); const error_ty_ref = try self.resolveType(Type.anyerror, .indirect); const eu_layout = self.errorUnionLayout(payload_ty); @@ -2875,7 +2875,7 @@ pub const DeclGen = struct { const eu_layout = self.errorUnionLayout(payload_ty); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { const err_id = if (eu_layout.payload_has_bits) try self.extractField(Type.anyerror, err_union_id, eu_layout.errorFieldIndex()) else @@ -2929,12 +2929,12 @@ pub const DeclGen = struct { const err_union_ty = self.typeOf(ty_op.operand); const err_ty_ref = try self.resolveType(Type.anyerror, .direct); - if (err_union_ty.errorUnionSet().errorSetIsEmpty(mod)) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { // No error possible, so just return undefined. return try self.spv.constUndef(err_ty_ref); } - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const eu_layout = self.errorUnionLayout(payload_ty); if (!eu_layout.payload_has_bits) { @@ -2948,9 +2948,10 @@ pub const DeclGen = struct { fn airWrapErrUnionErr(self: *DeclGen, inst: Air.Inst.Index) !?IdRef { if (self.liveness.isUnused(inst)) return null; + const mod = self.module; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_union_ty = self.typeOfIndex(inst); - const payload_ty = err_union_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(mod); const operand_id = try self.resolve(ty_op.operand); const eu_layout = self.errorUnionLayout(payload_ty); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index f4f19f30d0..4d8e865622 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -18,6 +18,7 @@ const LinkBlock = File.LinkBlock; const LinkFn = File.LinkFn; const LinkerLoad = @import("../codegen.zig").LinkerLoad; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const StringTable = @import("strtab.zig").StringTable; const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; @@ -518,9 +519,9 @@ pub const DeclState = struct { ); }, .ErrorUnion => { - const error_ty = ty.errorUnionSet(); - const payload_ty = ty.errorUnionPayload(); - const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(mod); + const error_ty = ty.errorUnionSet(mod); + const payload_ty = ty.errorUnionPayload(mod); + const payload_align = if (payload_ty.isNoReturn(mod)) 0 else payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); const abi_size = ty.abiSize(mod); const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0; @@ -534,7 +535,7 @@ pub const DeclState = struct { const name = try ty.nameAllocArena(arena, mod); try dbg_info_buffer.writer().print("{s}\x00", .{name}); - if (!payload_ty.isNoReturn()) { + if (!payload_ty.isNoReturn(mod)) { // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(7); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -1266,10 +1267,11 @@ pub fn commitDeclState( const symbol = &decl_state.abbrev_table.items[sym_index]; const ty = symbol.type; const deferred: bool = blk: { - if (ty.isAnyError()) break :blk true; - switch (ty.tag()) { - .error_set_inferred => { - if (!ty.castTag(.error_set_inferred).?.data.is_resolved) break :blk true; + if (ty.isAnyError(mod)) break :blk true; + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .inferred_error_set_type => |ies_index| { + const ies = mod.inferredErrorSetPtr(ies_index); + if (!ies.is_resolved) break :blk true; }, else => {}, } @@ -1290,10 +1292,11 @@ pub fn commitDeclState( const symbol = decl_state.abbrev_table.items[target]; const ty = symbol.type; const deferred: bool = blk: { - if (ty.isAnyError()) break :blk true; - switch (ty.tag()) { - .error_set_inferred => { - if (!ty.castTag(.error_set_inferred).?.data.is_resolved) break :blk true; + if (ty.isAnyError(mod)) break :blk true; + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .inferred_error_set_type => |ies_index| { + const ies = mod.inferredErrorSetPtr(ies_index); + if (!ies.is_resolved) break :blk true; }, else => {}, } @@ -2529,18 +2532,22 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { defer arena_alloc.deinit(); const arena = arena_alloc.allocator(); - const error_set = try arena.create(Module.ErrorSet); - const error_ty = try Type.Tag.error_set.create(arena, error_set); - var names = Module.ErrorSet.NameMap{}; - try names.ensureUnusedCapacity(arena, module.global_error_set.count()); - var it = module.global_error_set.keyIterator(); - while (it.next()) |key| { - names.putAssumeCapacityNoClobber(key.*, {}); + // TODO: don't create a zig type for this, just make the dwarf info + // without touching the zig type system. + const names = try arena.alloc(InternPool.NullTerminatedString, module.global_error_set.count()); + { + var it = module.global_error_set.keyIterator(); + var i: usize = 0; + while (it.next()) |key| : (i += 1) { + names[i] = module.intern_pool.getString(key.*).unwrap().?; + } } - error_set.names = names; + std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan); + + const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } }); var dbg_info_buffer = std.ArrayList(u8).init(arena); - try addDbgInfoErrorSet(arena, module, error_ty, self.target, &dbg_info_buffer); + try addDbgInfoErrorSet(arena, module, error_ty.toType(), self.target, &dbg_info_buffer); const di_atom_index = try self.createAtom(.di_atom); log.debug("updateDeclDebugInfoAllocation in flushModule", .{}); @@ -2684,8 +2691,9 @@ fn addDbgInfoErrorSet( // DW.AT.const_value, DW.FORM.data8 mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian); - const error_names = ty.errorSetNames(); - for (error_names) |error_name| { + const error_names = ty.errorSetNames(mod); + for (error_names) |error_name_ip| { + const error_name = mod.intern_pool.stringToSlice(error_name_ip); const kv = mod.getErrorValue(error_name) catch unreachable; // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64)); diff --git a/src/print_air.zig b/src/print_air.zig index 8cff417770..0e4f2d16cf 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -370,7 +370,6 @@ const Writer = struct { .none => switch (ty.tag()) { .inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"), .inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"), - else => try ty.print(s, w.module), }, else => try ty.print(s, w.module), } diff --git a/src/type.zig b/src/type.zig index ebe3d52b05..4e90cbd34d 100644 --- a/src/type.zig +++ b/src/type.zig @@ -36,17 +36,9 @@ pub const Type = struct { pub fn zigTypeTagOrPoison(ty: Type, mod: *const Module) error{GenericPoison}!std.builtin.TypeId { switch (ty.ip_index) { .none => switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => return .ErrorSet, - .inferred_alloc_const, .inferred_alloc_mut, => return .Pointer, - - .error_union => return .ErrorUnion, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => .Int, @@ -55,6 +47,7 @@ pub const Type = struct { .vector_type => .Vector, .opt_type => .Optional, .error_union_type => .ErrorUnion, + .error_set_type, .inferred_error_set_type => .ErrorSet, .struct_type, .anon_struct_type => .Struct, .union_type => .Union, .opaque_type => .Opaque, @@ -130,9 +123,9 @@ pub const Type = struct { } } - pub fn baseZigTypeTag(self: Type, mod: *const Module) std.builtin.TypeId { + pub fn baseZigTypeTag(self: Type, mod: *Module) std.builtin.TypeId { return switch (self.zigTypeTag(mod)) { - .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(mod), + .ErrorUnion => self.errorUnionPayload(mod).baseZigTypeTag(mod), .Optional => { return self.optionalChild(mod).baseZigTypeTag(mod); }, @@ -294,35 +287,6 @@ pub const Type = struct { if (a.legacy.tag_if_small_enough == b.legacy.tag_if_small_enough) return true; switch (a.tag()) { - .error_set_inferred => { - // Inferred error sets are only equal if both are inferred - // and they share the same pointer. - const a_ies = a.castTag(.error_set_inferred).?.data; - const b_ies = (b.castTag(.error_set_inferred) orelse return false).data; - return a_ies == b_ies; - }, - - .error_set, - .error_set_single, - .error_set_merged, - => { - switch (b.tag()) { - .error_set, .error_set_single, .error_set_merged => {}, - else => return false, - } - - // Two resolved sets match if their error set names match. - // Since they are pre-sorted we compare them element-wise. - const a_set = a.errorSetNames(); - const b_set = b.errorSetNames(); - if (a_set.len != b_set.len) return false; - for (a_set, 0..) |a_item, i| { - const b_item = b_set[i]; - if (!std.mem.eql(u8, a_item, b_item)) return false; - } - return true; - }, - .inferred_alloc_const, .inferred_alloc_mut, => { @@ -367,20 +331,6 @@ pub const Type = struct { return true; }, - - .error_union => { - if (b.zigTypeTag(mod) != .ErrorUnion) return false; - - const a_set = a.errorUnionSet(); - const b_set = b.errorUnionSet(); - if (!a_set.eql(b_set, mod)) return false; - - const a_payload = a.errorUnionPayload(); - const b_payload = b.errorUnionPayload(); - if (!a_payload.eql(b_payload, mod)) return false; - - return true; - }, } } @@ -399,28 +349,6 @@ pub const Type = struct { return; } switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_merged, - => { - // all are treated like an "error set" for hashing - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorSet); - std.hash.autoHash(hasher, Tag.error_set); - - const names = ty.errorSetNames(); - std.hash.autoHash(hasher, names.len); - assert(std.sort.isSorted([]const u8, names, u8, std.mem.lessThan)); - for (names) |name| hasher.update(name); - }, - - .error_set_inferred => { - // inferred error sets are compared using their data pointer - const ies: *Module.Fn.InferredErrorSet = ty.castTag(.error_set_inferred).?.data; - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorSet); - std.hash.autoHash(hasher, Tag.error_set_inferred); - std.hash.autoHash(hasher, ies); - }, - .inferred_alloc_const, .inferred_alloc_mut, => { @@ -439,16 +367,6 @@ pub const Type = struct { std.hash.autoHash(hasher, info.@"volatile"); std.hash.autoHash(hasher, info.size); }, - - .error_union => { - std.hash.autoHash(hasher, std.builtin.TypeId.ErrorUnion); - - const set_ty = ty.errorUnionSet(); - hashWithHasher(set_ty, hasher, mod); - - const payload_ty = ty.errorUnionPayload(); - hashWithHasher(payload_ty, hasher, mod); - }, } } @@ -484,52 +402,6 @@ pub const Type = struct { } }; - pub fn copy(self: Type, allocator: Allocator) error{OutOfMemory}!Type { - if (self.ip_index != .none) { - return Type{ .ip_index = self.ip_index, .legacy = undefined }; - } - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { - return Type{ - .ip_index = .none, - .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, - }; - } else switch (self.legacy.ptr_otherwise.tag) { - .inferred_alloc_const, - .inferred_alloc_mut, - => unreachable, - - .error_union => { - const payload = self.castTag(.error_union).?.data; - return Tag.error_union.create(allocator, .{ - .error_set = try payload.error_set.copy(allocator), - .payload = try payload.payload.copy(allocator), - }); - }, - .error_set_merged => { - const names = self.castTag(.error_set_merged).?.data.keys(); - var duped_names = Module.ErrorSet.NameMap{}; - try duped_names.ensureTotalCapacity(allocator, names.len); - for (names) |name| { - duped_names.putAssumeCapacityNoClobber(name, {}); - } - return Tag.error_set_merged.create(allocator, duped_names); - }, - .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), - .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred), - .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), - } - } - - fn copyPayloadShallow(self: Type, allocator: Allocator, comptime T: type) error{OutOfMemory}!Type { - const payload = self.cast(T).?; - const new_payload = try allocator.create(T); - new_payload.* = payload.*; - return Type{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - } - pub fn format(ty: Type, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { _ = ty; _ = unused_fmt_string; @@ -575,62 +447,7 @@ pub const Type = struct { ) @TypeOf(writer).Error!void { _ = options; comptime assert(unused_format_string.len == 0); - if (start_type.ip_index != .none) { - return writer.print("(intern index: {d})", .{@enumToInt(start_type.ip_index)}); - } - if (true) { - // This is disabled to work around a stage2 bug where this function recursively - // causes more generic function instantiations resulting in an infinite loop - // in the compiler. - try writer.writeAll("[TODO fix internal compiler bug regarding dump]"); - return; - } - var ty = start_type; - while (true) { - const t = ty.tag(); - switch (t) { - .error_union => { - const payload = ty.castTag(.error_union).?.data; - try payload.error_set.dump("", .{}, writer); - try writer.writeAll("!"); - ty = payload.payload; - continue; - }, - .error_set => { - const names = ty.castTag(.error_set).?.data.names.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - return; - }, - .error_set_inferred => { - const func = ty.castTag(.error_set_inferred).?.data.func; - return writer.print("({s} func={d})", .{ - @tagName(t), func.owner_decl, - }); - }, - .error_set_merged => { - const names = ty.castTag(.error_set_merged).?.data.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - return; - }, - .error_set_single => { - const name = ty.castTag(.error_set_single).?.data; - return writer.print("error{{{s}}}", .{name}); - }, - .inferred_alloc_const => return writer.writeAll("(inferred_alloc_const)"), - .inferred_alloc_mut => return writer.writeAll("(inferred_alloc_mut)"), - } - unreachable; - } + return writer.print("{any}", .{start_type.ip_index}); } pub const nameAllocArena = nameAlloc; @@ -648,45 +465,6 @@ pub const Type = struct { .none => switch (ty.tag()) { .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, - - .error_set_inferred => { - const func = ty.castTag(.error_set_inferred).?.data.func; - - try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.declPtr(func.owner_decl); - try owner_decl.renderFullyQualifiedName(mod, writer); - try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); - }, - - .error_union => { - const error_union = ty.castTag(.error_union).?.data; - try print(error_union.error_set, writer, mod); - try writer.writeAll("!"); - try print(error_union.payload, writer, mod); - }, - - .error_set => { - const names = ty.castTag(.error_set).?.data.names.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - }, - .error_set_single => { - const name = ty.castTag(.error_set_single).?.data; - return writer.print("error{{{s}}}", .{name}); - }, - .error_set_merged => { - const names = ty.castTag(.error_set_merged).?.data.keys(); - try writer.writeAll("error{"); - for (names, 0..) |name, i| { - if (i != 0) try writer.writeByte(','); - try writer.writeAll(name); - } - try writer.writeAll("}"); - }, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { @@ -766,6 +544,24 @@ pub const Type = struct { try print(error_union_type.payload_type.toType(), writer, mod); return; }, + .inferred_error_set_type => |index| { + const ies = mod.inferredErrorSetPtr(index); + const func = ies.func; + + try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); + const owner_decl = mod.declPtr(func.owner_decl); + try owner_decl.renderFullyQualifiedName(mod, writer); + try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); + }, + .error_set_type => |error_set_type| { + const names = error_set_type.names; + try writer.writeAll("error{"); + for (names, 0..) |name, i| { + if (i != 0) try writer.writeByte(','); + try writer.writeAll(mod.intern_pool.stringToSlice(name)); + } + try writer.writeAll("}"); + }, .simple_type => |s| return writer.writeAll(@tagName(s)), .struct_type => |struct_type| { if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { @@ -881,13 +677,8 @@ pub const Type = struct { return ty.ip_index; } - pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { - if (self.ip_index != .none) return self.ip_index.toValue(); - switch (self.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - else => return Value.Tag.ty.create(allocator, self), - } + pub fn toValue(self: Type) Value { + return self.toIntern().toValue(); } const RuntimeBitsError = Module.CompileError || error{NeedLazy}; @@ -914,14 +705,6 @@ pub const Type = struct { .empty_struct_type => return false, .none => switch (ty.tag()) { - .error_set_inferred, - - .error_set_single, - .error_union, - .error_set, - .error_set_merged, - => return true, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -951,7 +734,7 @@ pub const Type = struct { }, .opt_type => |child| { const child_ty = child.toType(); - if (child_ty.isNoReturn()) { + if (child_ty.isNoReturn(mod)) { // Then the optional is comptime-known to be null. return false; } @@ -963,7 +746,10 @@ pub const Type = struct { return !comptimeOnly(child_ty, mod); } }, - .error_union_type => @panic("TODO"), + .error_union_type, + .error_set_type, + .inferred_error_set_type, + => true, // These are function *bodies*, not pointers. // They return false here because they are comptime-only types. @@ -1103,112 +889,99 @@ pub const Type = struct { /// readFrom/writeToMemory are supported only for types with a well- /// defined memory layout pub fn hasWellDefinedLayout(ty: Type, mod: *Module) bool { - return switch (ty.ip_index) { - .empty_struct_type => false, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type, + .ptr_type, + .vector_type, + => true, - .none => switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .error_union, - => false, + .error_union_type, + .error_set_type, + .inferred_error_set_type, + .anon_struct_type, + .opaque_type, + .anyframe_type, + // These are function bodies, not function pointers. + .func_type, + => false, - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type, - .ptr_type, - .vector_type, + .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), + .opt_type => ty.isPtrLikeOptional(mod), + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .bool, + .void, => true, - .error_union_type, - .anon_struct_type, - .opaque_type, - .anyframe_type, - // These are function bodies, not function pointers. - .func_type, + .anyerror, + .anyopaque, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type, + .comptime_int, + .comptime_float, + .noreturn, + .null, + .undefined, + .enum_literal, + .type_info, + .generic_poison, => false, - .array_type => |array_type| array_type.child.toType().hasWellDefinedLayout(mod), - .opt_type => ty.isPtrLikeOptional(mod), - - .simple_type => |t| switch (t) { - .f16, - .f32, - .f64, - .f80, - .f128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .bool, - .void, - => true, - - .anyerror, - .anyopaque, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_modifier, - .prefetch_options, - .export_options, - .extern_options, - .type, - .comptime_int, - .comptime_float, - .noreturn, - .null, - .undefined, - .enum_literal, - .type_info, - .generic_poison, - => false, - - .var_args_param => unreachable, - }, - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { - // Struct with no fields has a well-defined layout of no bits. - return true; - }; - return struct_obj.layout != .Auto; - }, - .union_type => |union_type| switch (union_type.runtime_tag) { - .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, - .tagged => false, - }, - .enum_type => |enum_type| switch (enum_type.tag_mode) { - .auto => false, - .explicit, .nonexhaustive => true, - }, - - // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .var_args_param => unreachable, + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse { + // Struct with no fields has a well-defined layout of no bits. + return true; + }; + return struct_obj.layout != .Auto; + }, + .union_type => |union_type| switch (union_type.runtime_tag) { + .none, .safety => mod.unionPtr(union_type.index).layout != .Auto, + .tagged => false, }, + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .auto => false, + .explicit, .nonexhaustive => true, + }, + + // values, not types + .undef => unreachable, + .un => unreachable, + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .float => unreachable, + .ptr => unreachable, + .opt => unreachable, + .enum_tag => unreachable, + .aggregate => unreachable, }; } @@ -1247,35 +1020,8 @@ pub const Type = struct { }; } - pub fn isNoReturn(ty: Type) bool { - switch (@enumToInt(ty.ip_index)) { - @enumToInt(InternPool.Index.first_type)...@enumToInt(InternPool.Index.noreturn_type) - 1 => return false, - - @enumToInt(InternPool.Index.noreturn_type) => return true, - - @enumToInt(InternPool.Index.noreturn_type) + 1...@enumToInt(InternPool.Index.last_type) => return false, - - @enumToInt(InternPool.Index.first_value)...@enumToInt(InternPool.Index.last_value) => unreachable, - @enumToInt(InternPool.Index.generic_poison) => unreachable, - - // TODO add empty error sets here - // TODO add enums with no fields here - else => return false, - - @enumToInt(InternPool.Index.none) => switch (ty.tag()) { - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - const names = err_set_obj.names.keys(); - return names.len == 0; - }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - const names = name_map.keys(); - return names.len == 0; - }, - else => return false, - }, - } + pub fn isNoReturn(ty: Type, mod: *Module) bool { + return mod.intern_pool.isNoReturn(ty.ip_index); } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. @@ -1353,21 +1099,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiAlignmentAdvanced{ .scalar = 0 }, - .none => switch (ty.tag()) { - - // TODO revisit this when we have the concept of the error tag type - .error_set_inferred, - .error_set_single, - .error_set, - .error_set_merged, - => return AbiAlignmentAdvanced{ .scalar = 2 }, - - .error_union => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), - - .inferred_alloc_const, - .inferred_alloc_mut, - => unreachable, - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 }; @@ -1388,7 +1119,11 @@ pub const Type = struct { }, .opt_type => return abiAlignmentAdvancedOptional(ty, mod, strat), - .error_union_type => return abiAlignmentAdvancedErrorUnion(ty, mod, strat), + .error_union_type => |info| return abiAlignmentAdvancedErrorUnion(ty, mod, strat, info.payload_type.toType()), + + // TODO revisit this when we have the concept of the error tag type + .error_set_type, .inferred_error_set_type => return AbiAlignmentAdvanced{ .scalar = 2 }, + // represents machine code; not a pointer .func_type => |func_type| return AbiAlignmentAdvanced{ .scalar = if (func_type.alignment.toByteUnitsOptional()) |a| @@ -1572,14 +1307,14 @@ pub const Type = struct { ty: Type, mod: *Module, strat: AbiAlignmentAdvancedStrat, + payload_ty: Type, ) Module.CompileError!AbiAlignmentAdvanced { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. - const data = ty.castTag(.error_union).?.data; const code_align = abiAlignment(Type.anyerror, mod); switch (strat) { .eager, .sema => { - if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, else => |e| return e, })) { @@ -1587,11 +1322,11 @@ pub const Type = struct { } return AbiAlignmentAdvanced{ .scalar = @max( code_align, - (try data.payload.abiAlignmentAdvanced(mod, strat)).scalar, + (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, ) }; }, .lazy => |arena| { - switch (try data.payload.abiAlignmentAdvanced(mod, strat)) { + switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |payload_align| { return AbiAlignmentAdvanced{ .scalar = @max(code_align, payload_align), @@ -1728,55 +1463,6 @@ pub const Type = struct { switch (ty.ip_index) { .empty_struct_type => return AbiSizeAdvanced{ .scalar = 0 }, - .none => switch (ty.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - // TODO revisit this when we have the concept of the error tag type - .error_set_inferred, - .error_set, - .error_set_merged, - .error_set_single, - => return AbiSizeAdvanced{ .scalar = 2 }, - - .error_union => { - // This code needs to be kept in sync with the equivalent switch prong - // in abiAlignmentAdvanced. - const data = ty.castTag(.error_union).?.data; - const code_size = abiSize(Type.anyerror, mod); - if (!(data.payload.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, - else => |e| return e, - })) { - // Same as anyerror. - return AbiSizeAdvanced{ .scalar = code_size }; - } - const code_align = abiAlignment(Type.anyerror, mod); - const payload_align = abiAlignment(data.payload, mod); - const payload_size = switch (try data.payload.abiSizeAdvanced(mod, strat)) { - .scalar => |elem_size| elem_size, - .val => switch (strat) { - .sema => unreachable, - .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, - }, - }; - - var size: u64 = 0; - if (code_align > payload_align) { - size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); - size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); - } else { - size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); - size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); - } - return AbiSizeAdvanced{ .scalar = size }; - }, - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => |int_type| { if (int_type.bits == 0) return AbiSizeAdvanced{ .scalar = 0 }; @@ -1816,12 +1502,52 @@ pub const Type = struct { .val = try Value.Tag.lazy_size.create(strat.lazy, ty), }, }; - const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); - return AbiSizeAdvanced{ .scalar = result }; - }, + const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + return AbiSizeAdvanced{ .scalar = result }; + }, + + .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), + + // TODO revisit this when we have the concept of the error tag type + .error_set_type, .inferred_error_set_type => return AbiSizeAdvanced{ .scalar = 2 }, + + .error_union_type => |error_union_type| { + const payload_ty = error_union_type.payload_type.toType(); + // This code needs to be kept in sync with the equivalent switch prong + // in abiAlignmentAdvanced. + const code_size = abiSize(Type.anyerror, mod); + if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { + error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, + else => |e| return e, + })) { + // Same as anyerror. + return AbiSizeAdvanced{ .scalar = code_size }; + } + const code_align = abiAlignment(Type.anyerror, mod); + const payload_align = abiAlignment(payload_ty, mod); + const payload_size = switch (try payload_ty.abiSizeAdvanced(mod, strat)) { + .scalar => |elem_size| elem_size, + .val => switch (strat) { + .sema => unreachable, + .eager => unreachable, + .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + }, + }; - .opt_type => return ty.abiSizeAdvancedOptional(mod, strat), - .error_union_type => @panic("TODO"), + var size: u64 = 0; + if (code_align > payload_align) { + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + } else { + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + } + return AbiSizeAdvanced{ .scalar = size }; + }, .func_type => unreachable, // represents machine code; not a pointer .simple_type => |t| switch (t) { .bool, @@ -1982,7 +1708,7 @@ pub const Type = struct { ) Module.CompileError!AbiSizeAdvanced { const child_ty = ty.optionalChild(mod); - if (child_ty.isNoReturn()) { + if (child_ty.isNoReturn(mod)) { return AbiSizeAdvanced{ .scalar = 0 }; } @@ -2041,147 +1767,137 @@ pub const Type = struct { const strat: AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => return 16, // TODO revisit this when we have the concept of the error tag type + switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .int_type => |int_type| return int_type.bits, + .ptr_type => |ptr_type| switch (ptr_type.size) { + .Slice => return target.ptrBitWidth() * 2, + else => return target.ptrBitWidth() * 2, + }, + .anyframe_type => return target.ptrBitWidth(), + + .array_type => |array_type| { + const len = array_type.len + @boolToInt(array_type.sentinel != .none); + if (len == 0) return 0; + const elem_ty = array_type.child.toType(); + const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); + if (elem_size == 0) return 0; + const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); + return (len - 1) * 8 * elem_size + elem_bit_size; + }, + .vector_type => |vector_type| { + const child_ty = vector_type.child.toType(); + const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); + return elem_bit_size * vector_type.len; + }, + .opt_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, + + // TODO revisit this when we have the concept of the error tag type + .error_set_type, .inferred_error_set_type => return 16, + + .error_union_type => { + // Optionals and error unions are not packed so their bitsize + // includes padding bits. + return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; + }, + .func_type => unreachable, // represents machine code; not a pointer + .simple_type => |t| switch (t) { + .f16 => return 16, + .f32 => return 32, + .f64 => return 64, + .f80 => return 80, + .f128 => return 128, + + .usize, + .isize, + => return target.ptrBitWidth(), + + .c_char => return target.c_type_bit_size(.char), + .c_short => return target.c_type_bit_size(.short), + .c_ushort => return target.c_type_bit_size(.ushort), + .c_int => return target.c_type_bit_size(.int), + .c_uint => return target.c_type_bit_size(.uint), + .c_long => return target.c_type_bit_size(.long), + .c_ulong => return target.c_type_bit_size(.ulong), + .c_longlong => return target.c_type_bit_size(.longlong), + .c_ulonglong => return target.c_type_bit_size(.ulonglong), + .c_longdouble => return target.c_type_bit_size(.longdouble), + + .bool => return 1, + .void => return 0, - .error_union => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; - }, + // TODO revisit this when we have the concept of the error tag type + .anyerror => return 16, + + .anyopaque => unreachable, + .type => unreachable, + .comptime_int => unreachable, + .comptime_float => unreachable, + .noreturn => unreachable, + .null => unreachable, + .undefined => unreachable, + .enum_literal => unreachable, + .generic_poison => unreachable, + .var_args_param => unreachable, + + .atomic_order => unreachable, // missing call to resolveTypeFields + .atomic_rmw_op => unreachable, // missing call to resolveTypeFields + .calling_convention => unreachable, // missing call to resolveTypeFields + .address_space => unreachable, // missing call to resolveTypeFields + .float_mode => unreachable, // missing call to resolveTypeFields + .reduce_op => unreachable, // missing call to resolveTypeFields + .call_modifier => unreachable, // missing call to resolveTypeFields + .prefetch_options => unreachable, // missing call to resolveTypeFields + .export_options => unreachable, // missing call to resolveTypeFields + .extern_options => unreachable, // missing call to resolveTypeFields + .type_info => unreachable, // missing call to resolveTypeFields }, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .int_type => |int_type| return int_type.bits, - .ptr_type => |ptr_type| switch (ptr_type.size) { - .Slice => return target.ptrBitWidth() * 2, - else => return target.ptrBitWidth() * 2, - }, - .anyframe_type => return target.ptrBitWidth(), - - .array_type => |array_type| { - const len = array_type.len + @boolToInt(array_type.sentinel != .none); - if (len == 0) return 0; - const elem_ty = array_type.child.toType(); - const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); - if (elem_size == 0) return 0; - const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); - return (len - 1) * 8 * elem_size + elem_bit_size; - }, - .vector_type => |vector_type| { - const child_ty = vector_type.child.toType(); - const elem_bit_size = try bitSizeAdvanced(child_ty, mod, opt_sema); - return elem_bit_size * vector_type.len; - }, - .opt_type => { - // Optionals and error unions are not packed so their bitsize - // includes padding bits. - return (try abiSizeAdvanced(ty, mod, strat)).scalar * 8; - }, - .error_union_type => @panic("TODO"), - .func_type => unreachable, // represents machine code; not a pointer - .simple_type => |t| switch (t) { - .f16 => return 16, - .f32 => return 32, - .f64 => return 64, - .f80 => return 80, - .f128 => return 128, - - .usize, - .isize, - => return target.ptrBitWidth(), - - .c_char => return target.c_type_bit_size(.char), - .c_short => return target.c_type_bit_size(.short), - .c_ushort => return target.c_type_bit_size(.ushort), - .c_int => return target.c_type_bit_size(.int), - .c_uint => return target.c_type_bit_size(.uint), - .c_long => return target.c_type_bit_size(.long), - .c_ulong => return target.c_type_bit_size(.ulong), - .c_longlong => return target.c_type_bit_size(.longlong), - .c_ulonglong => return target.c_type_bit_size(.ulonglong), - .c_longdouble => return target.c_type_bit_size(.longdouble), - - .bool => return 1, - .void => return 0, - - // TODO revisit this when we have the concept of the error tag type - .anyerror => return 16, - - .anyopaque => unreachable, - .type => unreachable, - .comptime_int => unreachable, - .comptime_float => unreachable, - .noreturn => unreachable, - .null => unreachable, - .undefined => unreachable, - .enum_literal => unreachable, - .generic_poison => unreachable, - .var_args_param => unreachable, - - .atomic_order => unreachable, // missing call to resolveTypeFields - .atomic_rmw_op => unreachable, // missing call to resolveTypeFields - .calling_convention => unreachable, // missing call to resolveTypeFields - .address_space => unreachable, // missing call to resolveTypeFields - .float_mode => unreachable, // missing call to resolveTypeFields - .reduce_op => unreachable, // missing call to resolveTypeFields - .call_modifier => unreachable, // missing call to resolveTypeFields - .prefetch_options => unreachable, // missing call to resolveTypeFields - .export_options => unreachable, // missing call to resolveTypeFields - .extern_options => unreachable, // missing call to resolveTypeFields - .type_info => unreachable, // missing call to resolveTypeFields - }, - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; - if (struct_obj.layout != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); - assert(struct_obj.haveLayout()); - return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); - }, - - .anon_struct_type => { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return 0; + if (struct_obj.layout != .Packed) { return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - }, + } + if (opt_sema) |sema| _ = try sema.resolveTypeLayout(ty); + assert(struct_obj.haveLayout()); + return try struct_obj.backing_int_ty.bitSizeAdvanced(mod, opt_sema); + }, - .union_type => |union_type| { - if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); - if (ty.containerLayout(mod) != .Packed) { - return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; - } - const union_obj = mod.unionPtr(union_type.index); - assert(union_obj.haveFieldTypes()); + .anon_struct_type => { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + }, - var size: u64 = 0; - for (union_obj.fields.values()) |field| { - size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); - } - return size; - }, - .opaque_type => unreachable, - .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), + .union_type => |union_type| { + if (opt_sema) |sema| _ = try sema.resolveTypeFields(ty); + if (ty.containerLayout(mod) != .Packed) { + return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8; + } + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); - // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + var size: u64 = 0; + for (union_obj.fields.values()) |field| { + size = @max(size, try bitSizeAdvanced(field.ty, mod, opt_sema)); + } + return size; }, + .opaque_type => unreachable, + .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), + + // values, not types + .undef => unreachable, + .un => unreachable, + .simple_value => unreachable, + .extern_func => unreachable, + .int => unreachable, + .float => unreachable, + .ptr => unreachable, + .opt => unreachable, + .enum_tag => unreachable, + .aggregate => unreachable, } } @@ -2210,7 +1926,7 @@ pub const Type = struct { return payload_ty.layoutIsResolved(mod); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); return payload_ty.layoutIsResolved(mod); }, else => return true, @@ -2223,8 +1939,6 @@ pub const Type = struct { .inferred_alloc_const, .inferred_alloc_mut, => true, - - else => false, }, else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_info| ptr_info.size == .One, @@ -2245,8 +1959,6 @@ pub const Type = struct { .inferred_alloc_const, .inferred_alloc_mut, => .One, - - else => null, }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .ptr_type => |ptr_info| ptr_info.size, @@ -2534,69 +2246,43 @@ pub const Type = struct { } /// Asserts that the type is an error union. - pub fn errorUnionPayload(ty: Type) Type { - return switch (ty.ip_index) { - .anyerror_void_error_union_type => Type.void, - .none => switch (ty.tag()) { - .error_union => ty.castTag(.error_union).?.data.payload, - else => unreachable, - }, - else => @panic("TODO"), - }; + pub fn errorUnionPayload(ty: Type, mod: *Module) Type { + return mod.intern_pool.indexToKey(ty.ip_index).error_union_type.payload_type.toType(); } - pub fn errorUnionSet(ty: Type) Type { - return switch (ty.ip_index) { - .anyerror_void_error_union_type => Type.anyerror, - .none => switch (ty.tag()) { - .error_union => ty.castTag(.error_union).?.data.error_set, - else => unreachable, - }, - else => @panic("TODO"), - }; + /// Asserts that the type is an error union. + pub fn errorUnionSet(ty: Type, mod: *Module) Type { + return mod.intern_pool.indexToKey(ty.ip_index).error_union_type.error_set_type.toType(); } /// Returns false for unresolved inferred error sets. - pub fn errorSetIsEmpty(ty: Type, mod: *const Module) bool { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .error_set_inferred => { - const inferred_error_set = ty.castTag(.error_set_inferred).?.data; + pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { + return switch (ty.ip_index) { + .anyerror_type => false, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .error_set_type => |error_set_type| error_set_type.names.len == 0, + .inferred_error_set_type => |index| { + const inferred_error_set = mod.inferredErrorSetPtr(index); // Can't know for sure. if (!inferred_error_set.is_resolved) return false; if (inferred_error_set.is_anyerror) return false; return inferred_error_set.errors.count() == 0; }, - .error_set_single => return false, - .error_set => { - const err_set_obj = ty.castTag(.error_set).?.data; - return err_set_obj.names.count() == 0; - }, - .error_set_merged => { - const name_map = ty.castTag(.error_set_merged).?.data; - return name_map.count() == 0; - }, else => unreachable, }, - .anyerror_type => return false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { - else => @panic("TODO"), - }, - } + }; } /// Returns true if it is an error set that includes anyerror, false otherwise. /// Note that the result may be a false negative if the type did not get error set /// resolution prior to this call. - pub fn isAnyError(ty: Type) bool { + pub fn isAnyError(ty: Type, mod: *Module) bool { return switch (ty.ip_index) { - .none => switch (ty.tag()) { - .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror, + .anyerror_type => true, + else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .inferred_error_set_type => |i| mod.inferredErrorSetPtr(i).is_anyerror, else => false, }, - .anyerror_type => true, - // TODO handle error_set_inferred here - else => false, }; } @@ -2610,30 +2296,50 @@ pub const Type = struct { /// Returns whether ty, which must be an error set, includes an error `name`. /// Might return a false negative if `ty` is an inferred error set and not fully /// resolved yet. - pub fn errorSetHasField(ty: Type, name: []const u8) bool { - if (ty.isAnyError()) { - return true; - } - - switch (ty.tag()) { - .error_set_single => { - const data = ty.castTag(.error_set_single).?.data; - return std.mem.eql(u8, data, name); - }, - .error_set_inferred => { - const data = ty.castTag(.error_set_inferred).?.data; - return data.errors.contains(name); - }, - .error_set_merged => { - const data = ty.castTag(.error_set_merged).?.data; - return data.contains(name); + pub fn errorSetHasFieldIp( + ip: *const InternPool, + ty: InternPool.Index, + name: InternPool.NullTerminatedString, + ) bool { + return switch (ty) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type => |error_set_type| { + return error_set_type.nameIndex(ip, name) != null; + }, + .inferred_error_set_type => |index| { + const ies = ip.inferredErrorSetPtrConst(index); + if (ies.is_anyerror) return true; + return ies.errors.contains(name); + }, + else => unreachable, }, - .error_set => { - const data = ty.castTag(.error_set).?.data; - return data.names.contains(name); + }; + } + + /// Returns whether ty, which must be an error set, includes an error `name`. + /// Might return a false negative if `ty` is an inferred error set and not fully + /// resolved yet. + pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool { + const ip = &mod.intern_pool; + return switch (ty.ip_index) { + .anyerror_type => true, + else => switch (ip.indexToKey(ty.ip_index)) { + .error_set_type => |error_set_type| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return error_set_type.nameIndex(ip, field_name_interned) != null; + }, + .inferred_error_set_type => |index| { + const ies = ip.inferredErrorSetPtr(index); + if (ies.is_anyerror) return true; + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return ies.errors.contains(field_name_interned); + }, + else => unreachable, }, - else => unreachable, - } + }; } /// Asserts the type is an array or vector or struct. @@ -2727,14 +2433,6 @@ pub const Type = struct { var ty = starting_ty; while (true) switch (ty.ip_index) { - .none => switch (ty.tag()) { - .error_set, .error_set_single, .error_set_inferred, .error_set_merged => { - // TODO revisit this when error sets support custom int types - return .{ .signedness = .unsigned, .bits = 16 }; - }, - - else => unreachable, - }, .anyerror_type => { // TODO revisit this when error sets support custom int types return .{ .signedness = .unsigned, .bits = 16 }; @@ -2760,6 +2458,9 @@ pub const Type = struct { .enum_type => |enum_type| ty = enum_type.tag_ty.toType(), .vector_type => |vector_type| ty = vector_type.child.toType(), + // TODO revisit this when error sets support custom int types + .error_set_type, .inferred_error_set_type => return .{ .signedness = .unsigned, .bits = 16 }, + .anon_struct_type => unreachable, .ptr_type => unreachable, @@ -2932,13 +2633,6 @@ pub const Type = struct { .empty_struct_type => return Value.empty_struct, .none => switch (ty.tag()) { - .error_union, - .error_set_single, - .error_set, - .error_set_merged, - .error_set_inferred, - => return null, - .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, }, @@ -2955,6 +2649,8 @@ pub const Type = struct { .error_union_type, .func_type, .anyframe_type, + .error_set_type, + .inferred_error_set_type, => return null, .array_type => |array_type| { @@ -3130,18 +2826,6 @@ pub const Type = struct { return switch (ty.ip_index) { .empty_struct_type => false, - .none => switch (ty.tag()) { - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - => false, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .error_union => return ty.errorUnionPayload().comptimeOnly(mod), - }, else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { .int_type => false, .ptr_type => |ptr_type| { @@ -3160,6 +2844,11 @@ pub const Type = struct { .vector_type => |vector_type| vector_type.child.toType().comptimeOnly(mod), .opt_type => |child| child.toType().comptimeOnly(mod), .error_union_type => |error_union_type| error_union_type.payload_type.toType().comptimeOnly(mod), + + .error_set_type, + .inferred_error_set_type, + => false, + // These are function bodies, not function pointers. .func_type => true, @@ -3418,17 +3107,11 @@ pub const Type = struct { } // Asserts that `ty` is an error set and not `anyerror`. - pub fn errorSetNames(ty: Type) []const []const u8 { - return switch (ty.tag()) { - .error_set_single => blk: { - // Work around coercion problems - const tmp: *const [1][]const u8 = &ty.castTag(.error_set_single).?.data; - break :blk tmp; - }, - .error_set_merged => ty.castTag(.error_set_merged).?.data.keys(), - .error_set => ty.castTag(.error_set).?.data.names.keys(), - .error_set_inferred => { - const inferred_error_set = ty.castTag(.error_set_inferred).?.data; + pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .error_set_type => |x| x.names, + .inferred_error_set_type => |index| { + const inferred_error_set = mod.inferredErrorSetPtr(index); assert(inferred_error_set.is_resolved); assert(!inferred_error_set.is_anyerror); return inferred_error_set.errors.keys(); @@ -3437,26 +3120,6 @@ pub const Type = struct { }; } - /// Merge lhs with rhs. - /// Asserts that lhs and rhs are both error sets and are resolved. - pub fn errorSetMerge(lhs: Type, arena: Allocator, rhs: Type) !Type { - const lhs_names = lhs.errorSetNames(); - const rhs_names = rhs.errorSetNames(); - var names: Module.ErrorSet.NameMap = .{}; - try names.ensureUnusedCapacity(arena, lhs_names.len); - for (lhs_names) |name| { - names.putAssumeCapacityNoClobber(name, {}); - } - for (rhs_names) |name| { - try names.put(arena, name, {}); - } - - // names must be sorted - Module.ErrorSet.sortNames(&names); - - return try Tag.error_set_merged.create(arena, names); - } - pub fn enumFields(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { return mod.intern_pool.indexToKey(ty.ip_index).enum_type.names; } @@ -3748,30 +3411,19 @@ pub const Type = struct { } pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc { - switch (ty.ip_index) { - .empty_struct_type => return null, - .none => switch (ty.tag()) { - .error_set => { - const error_set = ty.castTag(.error_set).?.data; - return error_set.srcLoc(mod); - }, - - else => return null, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + return struct_obj.srcLoc(mod); }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - return struct_obj.srcLoc(mod); - }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.srcLoc(mod); - }, - .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), - .enum_type => |enum_type| mod.declPtr(enum_type.decl).srcLoc(mod), - else => null, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.srcLoc(mod); }, - } + .opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type), + .enum_type => |enum_type| mod.declPtr(enum_type.decl).srcLoc(mod), + else => null, + }; } pub fn getOwnerDecl(ty: Type, mod: *Module) Module.Decl.Index { @@ -3779,39 +3431,25 @@ pub const Type = struct { } pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?Module.Decl.Index { - switch (ty.ip_index) { - .none => switch (ty.tag()) { - .error_set => { - const error_set = ty.castTag(.error_set).?.data; - return error_set.owner_decl; - }, - - else => return null, + return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; + return struct_obj.owner_decl; }, - else => return switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .struct_type => |struct_type| { - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return null; - return struct_obj.owner_decl; - }, - .union_type => |union_type| { - const union_obj = mod.unionPtr(union_type.index); - return union_obj.owner_decl; - }, - .opaque_type => |opaque_type| opaque_type.decl, - .enum_type => |enum_type| enum_type.decl, - else => null, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + return union_obj.owner_decl; }, - } + .opaque_type => |opaque_type| opaque_type.decl, + .enum_type => |enum_type| enum_type.decl, + else => null, + }; } pub fn isGenericPoison(ty: Type) bool { return ty.ip_index == .generic_poison_type; } - pub fn isBoundFn(ty: Type) bool { - return ty.ip_index == .none and ty.tag() == .bound_fn; - } - /// This enum does not directly correspond to `std.builtin.TypeId` because /// it has extra enum tags in it, as a way of using less memory. For example, /// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types @@ -3827,54 +3465,8 @@ pub const Type = struct { inferred_alloc_const, // See last_no_payload_tag below. // After this, the tag requires a payload. - error_union, - error_set, - error_set_single, - /// The type is the inferred error set of a specific function. - error_set_inferred, - error_set_merged, - pub const last_no_payload_tag = Tag.inferred_alloc_const; pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; - - pub fn Type(comptime t: Tag) type { - return switch (t) { - .inferred_alloc_const, - .inferred_alloc_mut, - => @compileError("Type Tag " ++ @tagName(t) ++ " has no payload"), - - .error_set => Payload.ErrorSet, - .error_set_inferred => Payload.ErrorSetInferred, - .error_set_merged => Payload.ErrorSetMerged, - - .error_union => Payload.ErrorUnion, - .error_set_single => Payload.Name, - }; - } - - pub fn init(comptime t: Tag) file_struct.Type { - comptime std.debug.assert(@enumToInt(t) < Tag.no_payload_count); - return file_struct.Type{ - .ip_index = .none, - .legacy = .{ .tag_if_small_enough = t }, - }; - } - - pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!file_struct.Type { - const p = try ally.create(t.Type()); - p.* = .{ - .base = .{ .tag = t }, - .data = data, - }; - return file_struct.Type{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &p.base }, - }; - } - - pub fn Data(comptime t: Tag) type { - return std.meta.fieldInfo(t.Type(), .data).type; - } }; pub fn isTuple(ty: Type, mod: *Module) bool { @@ -3928,37 +3520,6 @@ pub const Type = struct { pub const Payload = struct { tag: Tag, - pub const Len = struct { - base: Payload, - data: u64, - }; - - pub const Bits = struct { - base: Payload, - data: u16, - }; - - pub const ErrorSet = struct { - pub const base_tag = Tag.error_set; - - base: Payload = Payload{ .tag = base_tag }, - data: *Module.ErrorSet, - }; - - pub const ErrorSetMerged = struct { - pub const base_tag = Tag.error_set_merged; - - base: Payload = Payload{ .tag = base_tag }, - data: Module.ErrorSet.NameMap, - }; - - pub const ErrorSetInferred = struct { - pub const base_tag = Tag.error_set_inferred; - - base: Payload = Payload{ .tag = base_tag }, - data: *Module.Fn.InferredErrorSet, - }; - /// TODO: remove this data structure since we have `InternPool.Key.PtrType`. pub const Pointer = struct { data: Data, @@ -4010,27 +3571,6 @@ pub const Type = struct { } }; }; - - pub const ErrorUnion = struct { - pub const base_tag = Tag.error_union; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - error_set: Type, - payload: Type, - }, - }; - - pub const Decl = struct { - base: Payload, - data: *Module.Decl, - }; - - pub const Name = struct { - base: Payload, - /// memory is owned by `Module` - data: []const u8, - }; }; pub const @"u1": Type = .{ .ip_index = .u1_type, .legacy = undefined }; @@ -4164,19 +3704,6 @@ pub const Type = struct { return mod.optionalType(child_type.ip_index); } - pub fn errorUnion( - arena: Allocator, - error_set: Type, - payload: Type, - mod: *Module, - ) Allocator.Error!Type { - assert(error_set.zigTypeTag(mod) == .ErrorSet); - return Type.Tag.error_union.create(arena, .{ - .error_set = error_set, - .payload = payload, - }); - } - pub fn smallestUnsignedBits(max: u64) u16 { if (max == 0) return 0; const base = std.math.log2(max); diff --git a/src/value.zig b/src/value.zig index 3100496085..4408d10231 100644 --- a/src/value.zig +++ b/src/value.zig @@ -260,7 +260,7 @@ pub const Value = struct { const new_payload = try arena.create(Payload.Ty); new_payload.* = .{ .base = payload.base, - .data = try payload.data.copy(arena), + .data = payload.data, }; return Value{ .ip_index = .none, @@ -281,7 +281,7 @@ pub const Value = struct { .base = payload.base, .data = .{ .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = try payload.data.container_ty.copy(arena), + .container_ty = payload.data.container_ty, }, }; return Value{ @@ -296,7 +296,7 @@ pub const Value = struct { .base = payload.base, .data = .{ .field_val = try payload.data.field_val.copy(arena), - .field_ty = try payload.data.field_ty.copy(arena), + .field_ty = payload.data.field_ty, }, }; return Value{ @@ -311,7 +311,7 @@ pub const Value = struct { .base = payload.base, .data = .{ .array_ptr = try payload.data.array_ptr.copy(arena), - .elem_ty = try payload.data.elem_ty.copy(arena), + .elem_ty = payload.data.elem_ty, .index = payload.data.index, }, }; @@ -327,7 +327,7 @@ pub const Value = struct { .base = payload.base, .data = .{ .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = try payload.data.container_ty.copy(arena), + .container_ty = payload.data.container_ty, .field_index = payload.data.field_index, }, }; @@ -1870,7 +1870,7 @@ pub const Value = struct { .eu_payload => { const a_payload = a.castTag(.eu_payload).?.data; const b_payload = b.castTag(.eu_payload).?.data; - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); }, .eu_payload_ptr => { @@ -2163,14 +2163,14 @@ pub const Value = struct { .ErrorUnion => { if (val.tag() == .@"error") { std.hash.autoHash(hasher, false); // error - const sub_ty = ty.errorUnionSet(); + const sub_ty = ty.errorUnionSet(mod); val.hash(sub_ty, hasher, mod); return; } if (val.castTag(.eu_payload)) |payload| { std.hash.autoHash(hasher, true); // payload - const sub_ty = ty.errorUnionPayload(); + const sub_ty = ty.errorUnionPayload(mod); payload.data.hash(sub_ty, hasher, mod); return; } else unreachable; @@ -2272,7 +2272,7 @@ pub const Value = struct { payload.data.hashUncoerced(child_ty, hasher, mod); } else std.hash.autoHash(hasher, std.builtin.TypeId.Null), .ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else { - const pl_ty = ty.errorUnionPayload(); + const pl_ty = ty.errorUnionPayload(mod); val.castTag(.eu_payload).?.data.hashUncoerced(pl_ty, hasher, mod); }, .Enum, .EnumLiteral, .Union => { -- cgit v1.2.3 From 6e0de1d11694a58745da76d601ebab7562feed09 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 22 May 2023 07:58:02 -0400 Subject: InternPool: port most of value tags --- lib/std/array_list.zig | 44 + src/Air.zig | 6 +- src/AstGen.zig | 30 +- src/Compilation.zig | 5 +- src/InternPool.zig | 827 ++++++++-- src/Module.zig | 409 +++-- src/Sema.zig | 2882 ++++++++++++++++------------------- src/TypedValue.zig | 245 +-- src/Zir.zig | 4 +- src/arch/aarch64/CodeGen.zig | 17 +- src/arch/arm/CodeGen.zig | 12 +- src/arch/riscv64/CodeGen.zig | 11 +- src/arch/sparc64/CodeGen.zig | 11 +- src/arch/wasm/CodeGen.zig | 351 +++-- src/arch/x86_64/CodeGen.zig | 65 +- src/codegen.zig | 1035 ++++++------- src/codegen/c.zig | 957 ++++++------ src/codegen/llvm.zig | 1608 +++++++++---------- src/codegen/spirv.zig | 312 ++-- src/link.zig | 19 +- src/link/C.zig | 10 +- src/link/Coff.zig | 18 +- src/link/Dwarf.zig | 8 +- src/link/Elf.zig | 18 +- src/link/MachO.zig | 28 +- src/link/NvPtx.zig | 4 +- src/link/Plan9.zig | 14 +- src/link/SpirV.zig | 6 +- src/link/Wasm.zig | 40 +- src/print_air.zig | 4 +- src/type.zig | 463 +++--- src/value.zig | 1775 +++++---------------- tools/lldb_pretty_printers.py | 6 +- tools/stage2_gdb_pretty_printers.py | 2 +- 34 files changed, 5236 insertions(+), 6010 deletions(-) (limited to 'src/arch') diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index bbfa588d6d..c2a2486dfa 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -459,6 +459,28 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { return self.items[prev_len..][0..n]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// The returned pointer becomes invalid when the list is resized. + /// Resizes list if `self.capacity` is not large enough. + pub fn addManyAsSlice(self: *Self, n: usize) Allocator.Error![]T { + const prev_len = self.items.len; + try self.resize(self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + /// **Does not** invalidate element pointers. + /// The returned pointer becomes invalid when the list is resized. + pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to the removed element. @@ -949,6 +971,28 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ return self.items[prev_len..][0..n]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// The returned pointer becomes invalid when the list is resized. + /// Resizes list if `self.capacity` is not large enough. + pub fn addManyAsSlice(self: *Self, allocator: Allocator, n: usize) Allocator.Error![]T { + const prev_len = self.items.len; + try self.resize(allocator, self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + /// **Does not** invalidate element pointers. + /// The returned pointer becomes invalid when the list is resized. + pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to last element. diff --git a/src/Air.zig b/src/Air.zig index 070cf7dc72..9dcbe174ec 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -901,8 +901,8 @@ pub const Inst = struct { manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), - const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), - const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), + slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type), + slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type), @@ -1382,7 +1382,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .bool_to_int => return Type.u1, - .tag_name, .error_name => return Type.const_slice_u8_sentinel_0, + .tag_name, .error_name => return Type.slice_const_u8_sentinel_0, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); diff --git a/src/AstGen.zig b/src/AstGen.zig index 998e08ba04..6956a58ae4 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -3934,7 +3934,7 @@ fn fnDecl( var section_gz = decl_gz.makeSubBlock(params_scope); defer section_gz.unstack(); const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .const_slice_u8_type } }, fn_proto.ast.section_expr); + const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr); if (section_gz.instructionsSlice().len == 0) { // In this case we will send a len=0 body which can be encoded more efficiently. break :inst inst; @@ -4137,7 +4137,7 @@ fn globalVarDecl( break :inst try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .address_space_type } }, var_decl.ast.addrspace_node); }; const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: { - break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .const_slice_u8_type } }, var_decl.ast.section_node); + break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .slice_const_u8_type } }, var_decl.ast.section_node); }; const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none; wip_members.nextDecl(is_pub, is_export, align_inst != .none, has_section_or_addrspace); @@ -7878,7 +7878,7 @@ fn unionInit( params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const union_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); const field_type = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{ .container_type = union_type, .field_name = field_name, @@ -8100,12 +8100,12 @@ fn builtinCall( if (ri.rl == .ref) { return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), }); } const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), }); return rvalue(gz, ri, result, node); }, @@ -8271,11 +8271,11 @@ fn builtinCall( .align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of), .ptr_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ptr_to_int), - .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .compile_error), + .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .compile_error), .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), .enum_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .enum_to_int), .bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int), - .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file), + .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .embed_file), .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name), .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety), .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), @@ -8334,7 +8334,7 @@ fn builtinCall( }, .panic => { try emitDbgNode(gz, node); - return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .panic); + return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .panic); }, .trap => { try emitDbgNode(gz, node); @@ -8450,7 +8450,7 @@ fn builtinCall( }, .c_define => { if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0]); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0]); const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), @@ -8546,7 +8546,7 @@ fn builtinCall( }, .field_parent_ptr => { const parent_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ .parent_type = parent_type, .field_name = field_name, @@ -8701,7 +8701,7 @@ fn hasDeclOrField( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const container_type = try typeExpr(gz, scope, lhs_node); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = container_type, .rhs = name, @@ -8851,7 +8851,7 @@ fn simpleCBuiltin( ) InnerError!Zir.Inst.Ref { const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, operand_node); + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, operand_node); _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, @@ -8869,7 +8869,7 @@ fn offsetOf( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const type_inst = try typeExpr(gz, scope, lhs_node); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = type_inst, .rhs = field_name, @@ -10317,8 +10317,8 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_type), as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type), as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), - as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), - as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_sentinel_0_type), + as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_sentinel_0_type), as_ty | @enumToInt(Zir.Inst.Ref.anyerror_void_error_union_type), as_ty | @enumToInt(Zir.Inst.Ref.generic_poison_type), as_ty | @enumToInt(Zir.Inst.Ref.empty_struct_type), diff --git a/src/Compilation.zig b/src/Compilation.zig index 43b16241fc..30ac499955 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -226,7 +226,7 @@ const Job = union(enum) { /// Write the constant value for a Decl to the output file. codegen_decl: Module.Decl.Index, /// Write the machine code for a function to the output file. - codegen_func: *Module.Fn, + codegen_func: Module.Fn.Index, /// Render the .h file snippet for the Decl. emit_h_decl: Module.Decl.Index, /// The Decl needs to be analyzed and possibly export itself. @@ -3208,7 +3208,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v // Tests are always emitted in test binaries. The decl_refs are created by // Module.populateTestFunctions, but this will not queue body analysis, so do // that now. - try module.ensureFuncBodyAnalysisQueued(decl.val.castTag(.function).?.data); + const func_index = module.intern_pool.indexToFunc(decl.val.ip_index).unwrap().?; + try module.ensureFuncBodyAnalysisQueued(func_index); } }, .update_embed_file => |embed_file| { diff --git a/src/InternPool.zig b/src/InternPool.zig index d19cc3d647..ec4d1df45f 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -34,6 +34,12 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, /// When a Union object is freed from `allocated_unions`, it is pushed into this stack. unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, +/// Fn objects are stored in this data structure because: +/// * They need to be mutated after creation. +allocated_funcs: std.SegmentedList(Module.Fn, 0) = .{}, +/// When a Fn object is freed from `allocated_funcs`, it is pushed into this stack. +funcs_free_list: std.ArrayListUnmanaged(Module.Fn.Index) = .{}, + /// InferredErrorSet objects are stored in this data structure because: /// * They contain pointers such as the errors map and the set of other inferred error sets. /// * They need to be mutated after creation. @@ -66,18 +72,18 @@ const Limb = std.math.big.Limb; const InternPool = @This(); const Module = @import("Module.zig"); +const Sema = @import("Sema.zig"); const KeyAdapter = struct { intern_pool: *const InternPool, pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool { _ = b_void; - return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a); + return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a, ctx.intern_pool); } pub fn hash(ctx: @This(), a: Key) u32 { - _ = ctx; - return a.hash32(); + return a.hash32(ctx.intern_pool); } }; @@ -111,10 +117,19 @@ pub const RuntimeIndex = enum(u32) { } }; +/// An index into `string_bytes`. +pub const String = enum(u32) { + _, +}; + /// An index into `string_bytes`. pub const NullTerminatedString = enum(u32) { _, + pub fn toString(self: NullTerminatedString) String { + return @intToEnum(String, @enumToInt(self)); + } + pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { return @intToEnum(OptionalNullTerminatedString, @enumToInt(self)); } @@ -180,23 +195,20 @@ pub const Key = union(enum) { /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented /// via `simple_value` and has a named `Index` tag for it. undef: Index, + runtime_value: TypeValue, simple_value: SimpleValue, - extern_func: struct { - ty: Index, - /// The Decl that corresponds to the function itself. - decl: Module.Decl.Index, - /// Library name if specified. - /// For example `extern "c" fn write(...) usize` would have 'c' as library name. - /// Index into the string table bytes. - lib_name: u32, - }, + variable: Key.Variable, + extern_func: ExternFunc, + func: Func, int: Key.Int, + err: Error, + error_union: ErrorUnion, + enum_literal: NullTerminatedString, /// A specific enum tag, indicated by the integer tag value. enum_tag: Key.EnumTag, float: Key.Float, ptr: Ptr, opt: Opt, - /// An instance of a struct, array, or vector. /// Each element/field stored as an `Index`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -261,7 +273,7 @@ pub const Key = union(enum) { pub const ArrayType = struct { len: u64, child: Index, - sentinel: Index, + sentinel: Index = .none, }; pub const VectorType = struct { @@ -369,6 +381,7 @@ pub const Key = union(enum) { return @intCast(u32, x); }, .i64, .big_int => return null, // out of range + .lazy_align, .lazy_size => unreachable, } } }; @@ -441,6 +454,32 @@ pub const Key = union(enum) { } }; + pub const Variable = struct { + ty: Index, + init: Index, + decl: Module.Decl.Index, + lib_name: OptionalNullTerminatedString = .none, + is_extern: bool = false, + is_const: bool = false, + is_threadlocal: bool = false, + is_weak_linkage: bool = false, + }; + + pub const ExternFunc = struct { + ty: Index, + /// The Decl that corresponds to the function itself. + decl: Module.Decl.Index, + /// Library name if specified. + /// For example `extern "c" fn write(...) usize` would have 'c' as library name. + /// Index into the string table bytes. + lib_name: OptionalNullTerminatedString, + }; + + pub const Func = struct { + ty: Index, + index: Module.Fn.Index, + }; + pub const Int = struct { ty: Index, storage: Storage, @@ -449,6 +488,8 @@ pub const Key = union(enum) { u64: u64, i64: i64, big_int: BigIntConst, + lazy_align: Index, + lazy_size: Index, /// Big enough to fit any non-BigInt value pub const BigIntSpace = struct { @@ -460,13 +501,26 @@ pub const Key = union(enum) { pub fn toBigInt(storage: Storage, space: *BigIntSpace) BigIntConst { return switch (storage) { .big_int => |x| x, - .u64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), - .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), + inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), + .lazy_align, .lazy_size => unreachable, }; } }; }; + pub const Error = struct { + ty: Index, + name: NullTerminatedString, + }; + + pub const ErrorUnion = struct { + ty: Index, + val: union(enum) { + err_name: NullTerminatedString, + payload: Index, + }, + }; + pub const EnumTag = struct { /// The enum type. ty: Index, @@ -497,19 +551,8 @@ pub const Key = union(enum) { len: Index = .none, pub const Addr = union(enum) { - @"var": struct { - init: Index, - owner_decl: Module.Decl.Index, - lib_name: OptionalNullTerminatedString, - is_const: bool, - is_threadlocal: bool, - is_weak_linkage: bool, - }, decl: Module.Decl.Index, - mut_decl: struct { - decl: Module.Decl.Index, - runtime_index: RuntimeIndex, - }, + mut_decl: MutDecl, int: Index, eu_payload: Index, opt_payload: Index, @@ -517,6 +560,10 @@ pub const Key = union(enum) { elem: BaseIndex, field: BaseIndex, + pub const MutDecl = struct { + decl: Module.Decl.Index, + runtime_index: RuntimeIndex, + }; pub const BaseIndex = struct { base: Index, index: u64, @@ -546,22 +593,31 @@ pub const Key = union(enum) { storage: Storage, pub const Storage = union(enum) { + bytes: []const u8, elems: []const Index, repeated_elem: Index, + + pub fn values(self: *const Storage) []const Index { + return switch (self.*) { + .bytes => &.{}, + .elems => |elems| elems, + .repeated_elem => |*elem| @as(*const [1]Index, elem), + }; + } }; }; - pub fn hash32(key: Key) u32 { - return @truncate(u32, key.hash64()); + pub fn hash32(key: Key, ip: *const InternPool) u32 { + return @truncate(u32, key.hash64(ip)); } - pub fn hash64(key: Key) u64 { + pub fn hash64(key: Key, ip: *const InternPool) u64 { var hasher = std.hash.Wyhash.init(0); - key.hashWithHasher(&hasher); + key.hashWithHasher(&hasher, ip); return hasher.final(); } - pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash) void { + pub fn hashWithHasher(key: Key, hasher: *std.hash.Wyhash, ip: *const InternPool) void { const KeyTag = @typeInfo(Key).Union.tag_type.?; const key_tag: KeyTag = key; std.hash.autoHash(hasher, key_tag); @@ -575,27 +631,45 @@ pub const Key = union(enum) { .error_union_type, .simple_type, .simple_value, - .extern_func, .opt, .struct_type, .union_type, .un, .undef, + .err, + .error_union, + .enum_literal, .enum_tag, .inferred_error_set_type, => |info| std.hash.autoHash(hasher, info), + .runtime_value => |runtime_value| std.hash.autoHash(hasher, runtime_value.val), .opaque_type => |opaque_type| std.hash.autoHash(hasher, opaque_type.decl), .enum_type => |enum_type| std.hash.autoHash(hasher, enum_type.decl), + .variable => |variable| std.hash.autoHash(hasher, variable.decl), + .extern_func => |extern_func| std.hash.autoHash(hasher, extern_func.decl), + .func => |func| std.hash.autoHash(hasher, func.index), + .int => |int| { // Canonicalize all integers by converting them to BigIntConst. - var buffer: Key.Int.Storage.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - - std.hash.autoHash(hasher, int.ty); - std.hash.autoHash(hasher, big_int.positive); - for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb); + switch (int.storage) { + .u64, .i64, .big_int => { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + + std.hash.autoHash(hasher, int.ty); + std.hash.autoHash(hasher, big_int.positive); + for (big_int.limbs) |limb| std.hash.autoHash(hasher, limb); + }, + .lazy_align, .lazy_size => |lazy_ty| { + std.hash.autoHash( + hasher, + @as(@typeInfo(Key.Int.Storage).Union.tag_type.?, int.storage), + ); + std.hash.autoHash(hasher, lazy_ty); + }, + } }, .float => |float| { @@ -615,7 +689,6 @@ pub const Key = union(enum) { // This is sound due to pointer provenance rules. std.hash.autoHash(hasher, @as(@typeInfo(Key.Ptr.Addr).Union.tag_type.?, ptr.addr)); switch (ptr.addr) { - .@"var" => |@"var"| std.hash.autoHash(hasher, @"var".owner_decl), .decl => |decl| std.hash.autoHash(hasher, decl), .mut_decl => |mut_decl| std.hash.autoHash(hasher, mut_decl), .int => |int| std.hash.autoHash(hasher, int), @@ -629,13 +702,47 @@ pub const Key = union(enum) { .aggregate => |aggregate| { std.hash.autoHash(hasher, aggregate.ty); - std.hash.autoHash(hasher, @as( - @typeInfo(Key.Aggregate.Storage).Union.tag_type.?, - aggregate.storage, - )); + switch (ip.indexToKey(aggregate.ty)) { + .array_type => |array_type| if (array_type.child == .u8_type) switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte), + .elems => |elems| { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + for (elems) |elem| std.hash.autoHash( + hasher, + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + }, + .repeated_elem => |elem| { + const len = ip.aggregateTypeLen(aggregate.ty); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable; + var i: u64 = 0; + while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); + }, + }, + else => {}, + } + switch (aggregate.storage) { - .elems => |elems| for (elems) |elem| std.hash.autoHash(hasher, elem), - .repeated_elem => |elem| std.hash.autoHash(hasher, elem), + .bytes => unreachable, + .elems => |elems| { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + for (elems) |elem| std.hash.autoHash( + hasher, + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + }, + .repeated_elem => |elem| { + const len = ip.aggregateTypeLen(aggregate.ty); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable; + var i: u64 = 0; + while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); + }, } }, @@ -663,7 +770,7 @@ pub const Key = union(enum) { } } - pub fn eql(a: Key, b: Key) bool { + pub fn eql(a: Key, b: Key, ip: *const InternPool) bool { const KeyTag = @typeInfo(Key).Union.tag_type.?; const a_tag: KeyTag = a; const b_tag: KeyTag = b; @@ -709,9 +816,9 @@ pub const Key = union(enum) { const b_info = b.undef; return a_info == b_info; }, - .extern_func => |a_info| { - const b_info = b.extern_func; - return std.meta.eql(a_info, b_info); + .runtime_value => |a_info| { + const b_info = b.runtime_value; + return a_info.val == b_info.val; }, .opt => |a_info| { const b_info = b.opt; @@ -729,11 +836,36 @@ pub const Key = union(enum) { const b_info = b.un; return std.meta.eql(a_info, b_info); }, + .err => |a_info| { + const b_info = b.err; + return std.meta.eql(a_info, b_info); + }, + .error_union => |a_info| { + const b_info = b.error_union; + return std.meta.eql(a_info, b_info); + }, + .enum_literal => |a_info| { + const b_info = b.enum_literal; + return a_info == b_info; + }, .enum_tag => |a_info| { const b_info = b.enum_tag; return std.meta.eql(a_info, b_info); }, + .variable => |a_info| { + const b_info = b.variable; + return a_info.decl == b_info.decl; + }, + .extern_func => |a_info| { + const b_info = b.extern_func; + return a_info.decl == b_info.decl; + }, + .func => |a_info| { + const b_info = b.func; + return a_info.index == b_info.index; + }, + .ptr => |a_info| { const b_info = b.ptr; if (a_info.ty != b_info.ty or a_info.len != b_info.len) return false; @@ -742,7 +874,6 @@ pub const Key = union(enum) { if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false; return switch (a_info.addr) { - .@"var" => |a_var| a_var.owner_decl == b_info.addr.@"var".owner_decl, .decl => |a_decl| a_decl == b_info.addr.decl, .mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl), .int => |a_int| a_int == b_info.addr.int, @@ -765,16 +896,27 @@ pub const Key = union(enum) { .u64 => |bb| aa == bb, .i64 => |bb| aa == bb, .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + .lazy_align, .lazy_size => false, }, .i64 => |aa| switch (b_info.storage) { .u64 => |bb| aa == bb, .i64 => |bb| aa == bb, .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + .lazy_align, .lazy_size => false, }, .big_int => |aa| switch (b_info.storage) { .u64 => |bb| aa.orderAgainstScalar(bb) == .eq, .i64 => |bb| aa.orderAgainstScalar(bb) == .eq, .big_int => |bb| aa.eq(bb), + .lazy_align, .lazy_size => false, + }, + .lazy_align => |aa| switch (b_info.storage) { + .u64, .i64, .big_int, .lazy_size => false, + .lazy_align => |bb| aa == bb, + }, + .lazy_size => |aa| switch (b_info.storage) { + .u64, .i64, .big_int, .lazy_align => false, + .lazy_size => |bb| aa == bb, }, }; }, @@ -818,12 +960,43 @@ pub const Key = union(enum) { if (a_info.ty != b_info.ty) return false; const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?; - if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) return false; + if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) { + for (0..@intCast(usize, ip.aggregateTypeLen(a_info.ty))) |elem_index| { + const a_elem = switch (a_info.storage) { + .bytes => |bytes| ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[elem_index] }, + } }) orelse return false, + .elems => |elems| elems[elem_index], + .repeated_elem => |elem| elem, + }; + const b_elem = switch (b_info.storage) { + .bytes => |bytes| ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[elem_index] }, + } }) orelse return false, + .elems => |elems| elems[elem_index], + .repeated_elem => |elem| elem, + }; + if (a_elem != b_elem) return false; + } + return true; + } - return switch (a_info.storage) { - .elems => |a_elems| std.mem.eql(Index, a_elems, b_info.storage.elems), - .repeated_elem => |a_elem| a_elem == b_info.storage.repeated_elem, - }; + switch (a_info.storage) { + .bytes => |a_bytes| { + const b_bytes = b_info.storage.bytes; + return std.mem.eql(u8, a_bytes, b_bytes); + }, + .elems => |a_elems| { + const b_elems = b_info.storage.elems; + return std.mem.eql(Index, a_elems, b_elems); + }, + .repeated_elem => |a_elem| { + const b_elem = b_info.storage.repeated_elem; + return a_elem == b_elem; + }, + } }, .anon_struct_type => |a_info| { const b_info = b.anon_struct_type; @@ -876,16 +1049,23 @@ pub const Key = union(enum) { .func_type, => .type_type, - inline .ptr, + inline .runtime_value, + .ptr, .int, .float, .opt, + .variable, .extern_func, + .func, + .err, + .error_union, .enum_tag, .aggregate, .un, => |x| x.ty, + .enum_literal => .enum_literal_type, + .undef => |x| x, .simple_value => |s| switch (s) { @@ -977,8 +1157,8 @@ pub const Index = enum(u32) { manyptr_const_u8_type, manyptr_const_u8_sentinel_0_type, single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - const_slice_u8_sentinel_0_type, + slice_const_u8_type, + slice_const_u8_sentinel_0_type, anyerror_void_error_union_type, generic_poison_type, inferred_alloc_const_type, @@ -1128,11 +1308,11 @@ pub const Index = enum(u32) { }, undef: DataIsIndex, + runtime_value: DataIsIndex, simple_value: struct { data: SimpleValue }, - ptr_var: struct { data: *PtrVar }, ptr_mut_decl: struct { data: *PtrMutDecl }, ptr_decl: struct { data: *PtrDecl }, - ptr_int: struct { data: *PtrInt }, + ptr_int: struct { data: *PtrAddr }, ptr_eu_payload: DataIsIndex, ptr_opt_payload: DataIsIndex, ptr_comptime_field: struct { data: *PtrComptimeField }, @@ -1151,6 +1331,12 @@ pub const Index = enum(u32) { int_small: struct { data: *IntSmall }, int_positive: struct { data: u32 }, int_negative: struct { data: u32 }, + int_lazy_align: struct { data: *IntLazy }, + int_lazy_size: struct { data: *IntLazy }, + error_set_error: struct { data: *Key.Error }, + error_union_error: struct { data: *Key.Error }, + error_union_payload: struct { data: *TypeValue }, + enum_literal: struct { data: NullTerminatedString }, enum_tag: struct { data: *Key.EnumTag }, float_f16: struct { data: f16 }, float_f32: struct { data: f32 }, @@ -1160,18 +1346,21 @@ pub const Index = enum(u32) { float_c_longdouble_f80: struct { data: *Float80 }, float_c_longdouble_f128: struct { data: *Float128 }, float_comptime_float: struct { data: *Float128 }, + variable: struct { data: *Variable }, extern_func: struct { data: void }, func: struct { data: void }, only_possible_value: DataIsIndex, union_value: struct { data: *Key.Union }, + bytes: struct { data: *Bytes }, aggregate: struct { data: *Aggregate }, repeated: struct { data: *Repeated }, }) void { _ = self; - @setEvalBranchQuota(10_000); - inline for (@typeInfo(Tag).Enum.fields) |tag| { - inline for (@typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields) |entry| { - if (comptime std.mem.eql(u8, tag.name, entry.name)) break; + const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields; + @setEvalBranchQuota(2_000); + inline for (@typeInfo(Tag).Enum.fields, 0..) |tag, start| { + inline for (0..map_fields.len) |offset| { + if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break; } else { @compileError(@typeName(Tag) ++ "." ++ tag.name ++ " missing dbHelper tag_to_encoding_map entry"); } @@ -1318,14 +1507,14 @@ pub const static_keys = [_]Key{ .is_const = true, } }, - // const_slice_u8_type + // slice_const_u8_type .{ .ptr_type = .{ .elem_type = .u8_type, .size = .Slice, .is_const = true, } }, - // const_slice_u8_sentinel_0_type + // slice_const_u8_sentinel_0_type .{ .ptr_type = .{ .elem_type = .u8_type, .sentinel = .zero_u8, @@ -1505,12 +1694,13 @@ pub const Tag = enum(u8) { /// `data` is `Index` of the type. /// Untyped `undefined` is stored instead via `simple_value`. undef, + /// A wrapper for values which are comptime-known but should + /// semantically be runtime-known. + /// `data` is `Index` of the value. + runtime_value, /// A value that can be represented with only an enum tag. /// data is SimpleValue enum value. simple_value, - /// A pointer to a var. - /// data is extra index of PtrVal, which contains the type and address. - ptr_var, /// A pointer to a decl that can be mutated at comptime. /// data is extra index of PtrMutDecl, which contains the type and address. ptr_mut_decl, @@ -1518,7 +1708,7 @@ pub const Tag = enum(u8) { /// data is extra index of PtrDecl, which contains the type and address. ptr_decl, /// A pointer with an integer value. - /// data is extra index of PtrInt, which contains the type and address. + /// data is extra index of PtrAddr, which contains the type and address. /// Only pointer types are allowed to have this encoding. Optional types must use /// `opt_payload` or `opt_null`. ptr_int, @@ -1585,6 +1775,24 @@ pub const Tag = enum(u8) { /// A negative integer value. /// data is a limbs index to `Int`. int_negative, + /// The ABI alignment of a lazy type. + /// data is extra index of `IntLazy`. + int_lazy_align, + /// The ABI size of a lazy type. + /// data is extra index of `IntLazy`. + int_lazy_size, + /// An error value. + /// data is extra index of `Key.Error`. + error_set_error, + /// An error union error. + /// data is extra index of `Key.Error`. + error_union_error, + /// An error union payload. + /// data is extra index of `TypeValue`. + error_union_payload, + /// An enum literal value. + /// data is `NullTerminatedString` of the error name. + enum_literal, /// An enum tag value. /// data is extra index of `Key.EnumTag`. enum_tag, @@ -1617,9 +1825,14 @@ pub const Tag = enum(u8) { /// A comptime_float value. /// data is extra index to Float128. float_comptime_float, + /// A global variable. + /// data is extra index to Variable. + variable, /// An extern function. + /// data is extra index to Key.ExternFunc. extern_func, /// A regular function. + /// data is extra index to Key.Func. func, /// This represents the only possible value for *some* types which have /// only one possible value. Not all only-possible-values are encoded this way; @@ -1631,6 +1844,9 @@ pub const Tag = enum(u8) { only_possible_value, /// data is extra index to Key.Union. union_value, + /// An array of bytes. + /// data is extra index to `Bytes`. + bytes, /// An instance of a struct, array, or vector. /// data is extra index to `Aggregate`. aggregate, @@ -1670,6 +1886,13 @@ pub const TypeFunction = struct { }; }; +pub const Bytes = struct { + /// The type of the aggregate + ty: Index, + /// Index into string_bytes, of len ip.aggregateTypeLen(ty) + bytes: String, +}; + /// Trailing: /// 0. element: Index for each len /// len is determined by the aggregate type. @@ -1843,6 +2066,11 @@ pub const Array = struct { } }; +pub const TypeValue = struct { + ty: Index, + val: Index, +}; + /// Trailing: /// 0. field name: NullTerminatedString for each fields_len; declaration order /// 1. tag value: Index for each fields_len; declaration order @@ -1888,21 +2116,22 @@ pub const PackedU64 = packed struct(u64) { } }; -pub const PtrVar = struct { - ty: Index, - /// If flags.is_extern == true this is `none`. +pub const Variable = struct { + /// This is a value if has_init is true, otherwise a type. init: Index, - owner_decl: Module.Decl.Index, + decl: Module.Decl.Index, /// Library name if specified. /// For example `extern "c" var stderrp = ...` would have 'c' as library name. lib_name: OptionalNullTerminatedString, flags: Flags, pub const Flags = packed struct(u32) { + has_init: bool, + is_extern: bool, is_const: bool, is_threadlocal: bool, is_weak_linkage: bool, - _: u29 = 0, + _: u27 = 0, }; }; @@ -1917,7 +2146,7 @@ pub const PtrMutDecl = struct { runtime_index: RuntimeIndex, }; -pub const PtrInt = struct { +pub const PtrAddr = struct { ty: Index, addr: Index, }; @@ -1949,6 +2178,11 @@ pub const IntSmall = struct { value: u32, }; +pub const IntLazy = struct { + ty: Index, + lazy_ty: Index, +}; + /// A f64 value, broken up into 2 u32 parts. pub const Float64 = struct { piece0: u32, @@ -2063,6 +2297,9 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); + ip.funcs_free_list.deinit(gpa); + ip.allocated_funcs.deinit(gpa); + ip.inferred_error_sets_free_list.deinit(gpa); ip.allocated_inferred_error_sets.deinit(gpa); @@ -2235,6 +2472,13 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .type_function => .{ .func_type = indexToKeyFuncType(ip, data) }, .undef => .{ .undef = @intToEnum(Index, data) }, + .runtime_value => { + const val = @intToEnum(Index, data); + return .{ .runtime_value = .{ + .ty = ip.typeOf(val), + .val = val, + } }; + }, .opt_null => .{ .opt = .{ .ty = @intToEnum(Index, data), .val = .none, @@ -2251,18 +2495,11 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .val = payload_val, } }; }, - .ptr_var => { - const info = ip.extraData(PtrVar, data); + .ptr_decl => { + const info = ip.extraData(PtrDecl, data); return .{ .ptr = .{ .ty = info.ty, - .addr = .{ .@"var" = .{ - .init = info.init, - .owner_decl = info.owner_decl, - .lib_name = info.lib_name, - .is_const = info.flags.is_const, - .is_threadlocal = info.flags.is_threadlocal, - .is_weak_linkage = info.flags.is_weak_linkage, - } }, + .addr = .{ .decl = info.decl }, } }; }, .ptr_mut_decl => { @@ -2275,15 +2512,8 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }, } }; }, - .ptr_decl => { - const info = ip.extraData(PtrDecl, data); - return .{ .ptr = .{ - .ty = info.ty, - .addr = .{ .decl = info.decl }, - } }; - }, .ptr_int => { - const info = ip.extraData(PtrInt, data); + const info = ip.extraData(PtrAddr, data); return .{ .ptr = .{ .ty = info.ty, .addr = .{ .int = info.addr }, @@ -2383,6 +2613,17 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .storage = .{ .u64 = info.value }, } }; }, + .int_lazy_align, .int_lazy_size => |tag| { + const info = ip.extraData(IntLazy, data); + return .{ .int = .{ + .ty = info.ty, + .storage = switch (tag) { + .int_lazy_align => .{ .lazy_align = info.lazy_ty }, + .int_lazy_size => .{ .lazy_size = info.lazy_ty }, + else => unreachable, + }, + } }; + }, .float_f16 => .{ .float = .{ .ty = .f16_type, .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) }, @@ -2415,8 +2656,21 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .ty = .comptime_float_type, .storage = .{ .f128 = ip.extraData(Float128, data).get() }, } }, - .extern_func => @panic("TODO"), - .func => @panic("TODO"), + .variable => { + const extra = ip.extraData(Variable, data); + return .{ .variable = .{ + .ty = if (extra.flags.has_init) ip.typeOf(extra.init) else extra.init, + .init = if (extra.flags.has_init) extra.init else .none, + .decl = extra.decl, + .lib_name = extra.lib_name, + .is_extern = extra.flags.is_extern, + .is_const = extra.flags.is_const, + .is_threadlocal = extra.flags.is_threadlocal, + .is_weak_linkage = extra.flags.is_weak_linkage, + } }; + }, + .extern_func => .{ .extern_func = ip.extraData(Key.ExternFunc, data) }, + .func => .{ .func = ip.extraData(Key.Func, data) }, .only_possible_value => { const ty = @intToEnum(Index, data); return switch (ip.indexToKey(ty)) { @@ -2438,6 +2692,14 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { else => unreachable, }; }, + .bytes => { + const extra = ip.extraData(Bytes, data); + const len = @intCast(u32, ip.aggregateTypeLen(extra.ty)); + return .{ .aggregate = .{ + .ty = extra.ty, + .storage = .{ .bytes = ip.string_bytes.items[@enumToInt(extra.bytes)..][0..len] }, + } }; + }, .aggregate => { const extra = ip.extraDataTrail(Aggregate, data); const len = @intCast(u32, ip.aggregateTypeLen(extra.data.ty)); @@ -2455,6 +2717,22 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { } }; }, .union_value => .{ .un = ip.extraData(Key.Union, data) }, + .error_set_error => .{ .err = ip.extraData(Key.Error, data) }, + .error_union_error => { + const extra = ip.extraData(Key.Error, data); + return .{ .error_union = .{ + .ty = extra.ty, + .val = .{ .err_name = extra.name }, + } }; + }, + .error_union_payload => { + const extra = ip.extraData(TypeValue, data); + return .{ .error_union = .{ + .ty = extra.ty, + .val = .{ .payload = extra.val }, + } }; + }, + .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) }, .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) }, }; } @@ -2547,7 +2825,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { _ = ip.map.pop(); var new_key = key; new_key.ptr_type.size = .Many; - const ptr_type_index = try get(ip, gpa, new_key); + const ptr_type_index = try ip.get(gpa, new_key); assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ @@ -2677,6 +2955,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = @enumToInt(ty), }); }, + .runtime_value => |runtime_value| { + assert(runtime_value.ty == ip.typeOf(runtime_value.val)); + ip.items.appendAssumeCapacity(.{ + .tag = .runtime_value, + .data = @enumToInt(runtime_value.val), + }); + }, .struct_type => |struct_type| { ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{ @@ -2809,7 +3094,35 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types)); }, - .extern_func => @panic("TODO"), + .variable => |variable| { + const has_init = variable.init != .none; + if (has_init) assert(variable.ty == ip.typeOf(variable.init)); + ip.items.appendAssumeCapacity(.{ + .tag = .variable, + .data = try ip.addExtra(gpa, Variable{ + .init = if (has_init) variable.init else variable.ty, + .decl = variable.decl, + .lib_name = variable.lib_name, + .flags = .{ + .has_init = has_init, + .is_extern = variable.is_extern, + .is_const = variable.is_const, + .is_threadlocal = variable.is_threadlocal, + .is_weak_linkage = variable.is_weak_linkage, + }, + }), + }); + }, + + .extern_func => |extern_func| ip.items.appendAssumeCapacity(.{ + .tag = .extern_func, + .data = try ip.addExtra(gpa, extern_func), + }), + + .func => |func| ip.items.appendAssumeCapacity(.{ + .tag = .func, + .data = try ip.addExtra(gpa, func), + }), .ptr => |ptr| { const ptr_type = ip.indexToKey(ptr.ty).ptr_type; @@ -2817,20 +3130,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .none => { assert(ptr_type.size != .Slice); switch (ptr.addr) { - .@"var" => |@"var"| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_var, - .data = try ip.addExtra(gpa, PtrVar{ - .ty = ptr.ty, - .init = @"var".init, - .owner_decl = @"var".owner_decl, - .lib_name = @"var".lib_name, - .flags = .{ - .is_const = @"var".is_const, - .is_threadlocal = @"var".is_threadlocal, - .is_weak_linkage = @"var".is_weak_linkage, - }, - }), - }), .decl => |decl| ip.items.appendAssumeCapacity(.{ .tag = .ptr_decl, .data = try ip.addExtra(gpa, PtrDecl{ @@ -2846,31 +3145,41 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .runtime_index = mut_decl.runtime_index, }), }), - .int => |int| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_int, - .data = try ip.addExtra(gpa, PtrInt{ - .ty = ptr.ty, - .addr = int, - }), - }), - .eu_payload, .opt_payload => |data| ip.items.appendAssumeCapacity(.{ - .tag = switch (ptr.addr) { - .eu_payload => .ptr_eu_payload, - .opt_payload => .ptr_opt_payload, - else => unreachable, - }, - .data = @enumToInt(data), - }), - .comptime_field => |field_val| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_comptime_field, - .data = try ip.addExtra(gpa, PtrComptimeField{ - .ty = ptr.ty, - .field_val = field_val, - }), - }), + .int => |int| { + assert(int != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_int, + .data = try ip.addExtra(gpa, PtrAddr{ + .ty = ptr.ty, + .addr = int, + }), + }); + }, + .eu_payload, .opt_payload => |data| { + assert(data != .none); + ip.items.appendAssumeCapacity(.{ + .tag = switch (ptr.addr) { + .eu_payload => .ptr_eu_payload, + .opt_payload => .ptr_opt_payload, + else => unreachable, + }, + .data = @enumToInt(data), + }); + }, + .comptime_field => |field_val| { + assert(field_val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_comptime_field, + .data = try ip.addExtra(gpa, PtrComptimeField{ + .ty = ptr.ty, + .field_val = field_val, + }), + }); + }, .elem, .field => |base_index| { + assert(base_index.base != .none); _ = ip.map.pop(); - const index_index = try get(ip, gpa, .{ .int = .{ + const index_index = try ip.get(gpa, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = base_index.index }, } }); @@ -2894,7 +3203,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { new_key.ptr.ty = ip.slicePtrType(ptr.ty); new_key.ptr.len = .none; assert(ip.indexToKey(new_key.ptr.ty).ptr_type.size == .Many); - const ptr_index = try get(ip, gpa, new_key); + const ptr_index = try ip.get(gpa, new_key); assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ @@ -2921,8 +3230,25 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .int => |int| b: { + assert(int.ty == .comptime_int_type or ip.indexToKey(int.ty) == .int_type); + switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| { + ip.items.appendAssumeCapacity(.{ + .tag = switch (int.storage) { + else => unreachable, + .lazy_align => .int_lazy_align, + .lazy_size => .int_lazy_size, + }, + .data = try ip.addExtra(gpa, IntLazy{ + .ty = int.ty, + .lazy_ty = lazy_ty, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + }, + } switch (int.ty) { - .none => unreachable, .u8_type => switch (int.storage) { .big_int => |big_int| { ip.items.appendAssumeCapacity(.{ @@ -2938,6 +3264,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .u16_type => switch (int.storage) { .big_int => |big_int| { @@ -2954,6 +3281,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .u32_type => switch (int.storage) { .big_int => |big_int| { @@ -2970,6 +3298,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .i32_type => switch (int.storage) { .big_int => |big_int| { @@ -2987,6 +3316,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); break :b; }, + .lazy_align, .lazy_size => unreachable, }, .usize_type => switch (int.storage) { .big_int => |big_int| { @@ -3007,6 +3337,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { break :b; } }, + .lazy_align, .lazy_size => unreachable, }, .comptime_int_type => switch (int.storage) { .big_int => |big_int| { @@ -3041,6 +3372,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { break :b; } }, + .lazy_align, .lazy_size => unreachable, }, else => {}, } @@ -3077,9 +3409,37 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const tag: Tag = if (big_int.positive) .int_positive else .int_negative; try addInt(ip, gpa, int.ty, tag, big_int.limbs); }, + .lazy_align, .lazy_size => unreachable, } }, + .err => |err| ip.items.appendAssumeCapacity(.{ + .tag = .error_set_error, + .data = try ip.addExtra(gpa, err), + }), + + .error_union => |error_union| ip.items.appendAssumeCapacity(switch (error_union.val) { + .err_name => |err_name| .{ + .tag = .error_union_error, + .data = try ip.addExtra(gpa, Key.Error{ + .ty = error_union.ty, + .name = err_name, + }), + }, + .payload => |payload| .{ + .tag = .error_union_payload, + .data = try ip.addExtra(gpa, TypeValue{ + .ty = error_union.ty, + .val = payload, + }), + }, + }), + + .enum_literal => |enum_literal| ip.items.appendAssumeCapacity(.{ + .tag = .enum_literal, + .data = @enumToInt(enum_literal), + }), + .enum_tag => |enum_tag| { assert(enum_tag.ty != .none); assert(enum_tag.int != .none); @@ -3131,9 +3491,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .aggregate => |aggregate| { - assert(aggregate.ty != .none); + const ty_key = ip.indexToKey(aggregate.ty); const aggregate_len = ip.aggregateTypeLen(aggregate.ty); switch (aggregate.storage) { + .bytes => { + assert(ty_key.array_type.child == .u8_type); + }, .elems => |elems| { assert(elems.len == aggregate_len); for (elems) |elem| assert(elem != .none); @@ -3151,9 +3514,15 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { return @intToEnum(Index, ip.items.len - 1); } - switch (ip.indexToKey(aggregate.ty)) { + switch (ty_key) { .anon_struct_type => |anon_struct_type| { if (switch (aggregate.storage) { + .bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| { + if (value != ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = byte }, + } })) break false; + } else true, .elems => |elems| std.mem.eql(Index, anon_struct_type.values, elems), .repeated_elem => |elem| for (anon_struct_type.values) |value| { if (value != elem) break false; @@ -3173,34 +3542,80 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } if (switch (aggregate.storage) { + .bytes => |bytes| for (bytes[1..]) |byte| { + if (byte != bytes[0]) break false; + } else true, .elems => |elems| for (elems[1..]) |elem| { if (elem != elems[0]) break false; } else true, .repeated_elem => true, }) { + const elem = switch (aggregate.storage) { + .bytes => |bytes| elem: { + _ = ip.map.pop(); + const elem = try ip.get(gpa, .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[0] }, + } }); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + break :elem elem; + }, + .elems => |elems| elems[0], + .repeated_elem => |elem| elem, + }; + try ip.extra.ensureUnusedCapacity( gpa, @typeInfo(Repeated).Struct.fields.len, ); - ip.items.appendAssumeCapacity(.{ .tag = .repeated, .data = ip.addExtraAssumeCapacity(Repeated{ .ty = aggregate.ty, - .elem_val = switch (aggregate.storage) { - .elems => |elems| elems[0], - .repeated_elem => |elem| elem, - }, + .elem_val = elem, }), }); return @intToEnum(Index, ip.items.len - 1); } + switch (ty_key) { + .array_type => |array_type| if (array_type.child == .u8_type) { + const len_including_sentinel = aggregate_len + @boolToInt(array_type.sentinel != .none); + try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + switch (aggregate.storage) { + .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), + .elems => |elems| for (elems) |elem| ip.string_bytes.appendAssumeCapacity( + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable, + ), + .repeated_elem => |elem| @memset( + ip.string_bytes.addManyAsSliceAssumeCapacity(aggregate_len), + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch unreachable, + ), + } + if (array_type.sentinel != .none) ip.string_bytes.appendAssumeCapacity( + ip.indexToKey(array_type.sentinel).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + const bytes = try ip.getOrPutTrailingString(gpa, len_including_sentinel); + ip.items.appendAssumeCapacity(.{ + .tag = .bytes, + .data = ip.addExtraAssumeCapacity(Bytes{ + .ty = aggregate.ty, + .bytes = bytes.toString(), + }), + }); + return @intToEnum(Index, ip.items.len - 1); + }, + else => {}, + } + try ip.extra.ensureUnusedCapacity( gpa, @typeInfo(Aggregate).Struct.fields.len + aggregate_len, ); - ip.items.appendAssumeCapacity(.{ .tag = .aggregate, .data = ip.addExtraAssumeCapacity(Aggregate{ @@ -3423,12 +3838,16 @@ pub fn finishGetEnum( return @intToEnum(Index, ip.items.len - 1); } -pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { +pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; - const index = ip.map.getIndexAdapted(key, adapter).?; + const index = ip.map.getIndexAdapted(key, adapter) orelse return null; return @intToEnum(Index, index); } +pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { + return ip.getIfExists(key).?; +} + fn addStringsToMap( ip: *InternPool, gpa: Allocator, @@ -3500,9 +3919,11 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { Module.Decl.Index => @enumToInt(@field(extra, field.name)), Module.Namespace.Index => @enumToInt(@field(extra, field.name)), Module.Namespace.OptionalIndex => @enumToInt(@field(extra, field.name)), + Module.Fn.Index => @enumToInt(@field(extra, field.name)), MapIndex => @enumToInt(@field(extra, field.name)), OptionalMapIndex => @enumToInt(@field(extra, field.name)), RuntimeIndex => @enumToInt(@field(extra, field.name)), + String => @enumToInt(@field(extra, field.name)), NullTerminatedString => @enumToInt(@field(extra, field.name)), OptionalNullTerminatedString => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), @@ -3510,7 +3931,7 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), Pointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), Pointer.VectorIndex => @enumToInt(@field(extra, field.name)), - PtrVar.Flags => @bitCast(u32, @field(extra, field.name)), + Variable.Flags => @bitCast(u32, @field(extra, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -3566,9 +3987,11 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: Module.Decl.Index => @intToEnum(Module.Decl.Index, int32), Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32), Module.Namespace.OptionalIndex => @intToEnum(Module.Namespace.OptionalIndex, int32), + Module.Fn.Index => @intToEnum(Module.Fn.Index, int32), MapIndex => @intToEnum(MapIndex, int32), OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), RuntimeIndex => @intToEnum(RuntimeIndex, int32), + String => @intToEnum(String, int32), NullTerminatedString => @intToEnum(NullTerminatedString, int32), OptionalNullTerminatedString => @intToEnum(OptionalNullTerminatedString, int32), i32 => @bitCast(i32, int32), @@ -3576,7 +3999,7 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), Pointer.PackedOffset => @bitCast(Pointer.PackedOffset, int32), Pointer.VectorIndex => @intToEnum(Pointer.VectorIndex, int32), - PtrVar.Flags => @bitCast(PtrVar.Flags, int32), + Variable.Flags => @bitCast(Variable.Flags, int32), else => @compileError("bad field type: " ++ @typeName(field.type)), }; } @@ -3700,8 +4123,8 @@ pub fn childType(ip: InternPool, i: Index) Index { /// Given a slice type, returns the type of the ptr field. pub fn slicePtrType(ip: InternPool, i: Index) Index { switch (i) { - .const_slice_u8_type => return .manyptr_const_u8_type, - .const_slice_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, + .slice_const_u8_type => return .manyptr_const_u8_type, + .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, else => {}, } const item = ip.items.get(@enumToInt(i)); @@ -3830,6 +4253,8 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } }, } }); }, + + .lazy_align, .lazy_size => unreachable, } } @@ -3862,6 +4287,14 @@ pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { } } +pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .func) return .none; + const datas = ip.items.items(.data); + return ip.extraData(Key.Func, datas[@enumToInt(val)]).index.toOptional(); +} + pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); @@ -3891,6 +4324,15 @@ pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool { return tags[@enumToInt(ty)] == .type_inferred_error_set; } +/// The is only legal because the initializer is not part of the hash. +pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { + assert(ip.items.items(.tag)[@enumToInt(index)] == .variable); + const field_index = inline for (@typeInfo(Variable).Struct.fields, 0..) |field, field_index| { + if (comptime std.mem.eql(u8, field.name, "init")) break field_index; + } else unreachable; + ip.extra.items[ip.items.items(.data)[@enumToInt(index)] + field_index] = @enumToInt(init_index); +} + pub fn dump(ip: InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } @@ -3903,10 +4345,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { (@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); const unions_size = ip.allocated_unions.len * (@sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); + const funcs_size = ip.allocated_funcs.len * + (@sizeOf(Module.Fn) + @sizeOf(Module.Decl)); // TODO: map overhead size is not taken into account const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + - structs_size + unions_size; + structs_size + unions_size + funcs_size; std.debug.print( \\InternPool size: {d} bytes @@ -3915,6 +4359,7 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { \\ {d} limbs: {d} bytes \\ {d} structs: {d} bytes \\ {d} unions: {d} bytes + \\ {d} funcs: {d} bytes \\ , .{ total_size, @@ -3928,6 +4373,8 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { structs_size, ip.allocated_unions.len, unions_size, + ip.allocated_funcs.len, + funcs_size, }); const tags = ip.items.items(.tag); @@ -3982,12 +4429,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { }, .undef => 0, + .runtime_value => 0, .simple_type => 0, .simple_value => 0, - .ptr_var => @sizeOf(PtrVar), .ptr_decl => @sizeOf(PtrDecl), .ptr_mut_decl => @sizeOf(PtrMutDecl), - .ptr_int => @sizeOf(PtrInt), + .ptr_int => @sizeOf(PtrAddr), .ptr_eu_payload => 0, .ptr_opt_payload => 0, .ptr_comptime_field => @sizeOf(PtrComptimeField), @@ -4011,8 +4458,20 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { const int = ip.limbData(Int, data); break :b @sizeOf(Int) + int.limbs_len * 8; }, + + .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), + + .error_set_error, .error_union_error => @sizeOf(Key.Error), + .error_union_payload => @sizeOf(TypeValue), + .enum_literal => 0, .enum_tag => @sizeOf(Key.EnumTag), + .bytes => b: { + const info = ip.extraData(Bytes, data); + const len = @intCast(u32, ip.aggregateTypeLen(info.ty)); + break :b @sizeOf(Bytes) + len + + @boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0); + }, .aggregate => b: { const info = ip.extraData(Aggregate, data); const fields_len = @intCast(u32, ip.aggregateTypeLen(info.ty)); @@ -4028,8 +4487,9 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .float_c_longdouble_f80 => @sizeOf(Float80), .float_c_longdouble_f128 => @sizeOf(Float128), .float_comptime_float => @sizeOf(Float128), - .extern_func => @panic("TODO"), - .func => @panic("TODO"), + .variable => @sizeOf(Variable) + @sizeOf(Module.Decl), + .extern_func => @sizeOf(Key.ExternFunc) + @sizeOf(Module.Decl), + .func => @sizeOf(Key.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), .only_possible_value => 0, .union_value => @sizeOf(Key.Union), }); @@ -4071,6 +4531,14 @@ pub fn unionPtrConst(ip: InternPool, index: Module.Union.Index) *const Module.Un return ip.allocated_unions.at(@enumToInt(index)); } +pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn { + return ip.allocated_funcs.at(@enumToInt(index)); +} + +pub fn funcPtrConst(ip: InternPool, index: Module.Fn.Index) *const Module.Fn { + return ip.allocated_funcs.at(@enumToInt(index)); +} + pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet { return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } @@ -4117,6 +4585,25 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) }; } +pub fn createFunc( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Fn, +) Allocator.Error!Module.Fn.Index { + if (ip.funcs_free_list.popOrNull()) |index| return index; + const ptr = try ip.allocated_funcs.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Fn.Index, ip.allocated_funcs.len - 1); +} + +pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void { + ip.funcPtr(index).* = undefined; + ip.funcs_free_list.append(gpa, index) catch { + // In order to keep `destroyFunc` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Union until garbage collection. + }; +} + pub fn createInferredErrorSet( ip: *InternPool, gpa: Allocator, @@ -4142,9 +4629,25 @@ pub fn getOrPutString( s: []const u8, ) Allocator.Error!NullTerminatedString { const string_bytes = &ip.string_bytes; - const str_index = @intCast(u32, string_bytes.items.len); try string_bytes.ensureUnusedCapacity(gpa, s.len + 1); string_bytes.appendSliceAssumeCapacity(s); + string_bytes.appendAssumeCapacity(0); + return ip.getOrPutTrailingString(gpa, s.len + 1); +} + +/// Uses the last len bytes of ip.string_bytes as the key. +pub fn getOrPutTrailingString( + ip: *InternPool, + gpa: Allocator, + len: usize, +) Allocator.Error!NullTerminatedString { + const string_bytes = &ip.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len - len); + if (len > 0 and string_bytes.getLast() == 0) { + _ = string_bytes.pop(); + } else { + try string_bytes.ensureUnusedCapacity(gpa, 1); + } const key: []const u8 = string_bytes.items[str_index..]; const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{ .bytes = string_bytes, @@ -4179,6 +4682,10 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { return string_bytes[start..end :0]; } +pub fn stringToSliceUnwrap(ip: InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { + return ip.stringToSlice(s.unwrap() orelse return null); +} + pub fn typeOf(ip: InternPool, index: Index) Index { return ip.indexToKey(index).typeOf(); } @@ -4199,7 +4706,7 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { }; } -pub fn isNoReturn(ip: InternPool, ty: InternPool.Index) bool { +pub fn isNoReturn(ip: InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, else => switch (ip.indexToKey(ty)) { diff --git a/src/Module.zig b/src/Module.zig index 8174778f48..fa24c237b4 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -109,7 +109,7 @@ memoized_calls: MemoizedCallSet = .{}, /// Contains the values from `@setAlignStack`. A sparse table is used here /// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while /// functions are many. -align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{}, +align_stack_fns: std.AutoHashMapUnmanaged(Fn.Index, SetAlignStack) = .{}, /// We optimize memory usage for a compilation with no compile errors by storing the /// error messages and mapping outside of `Decl`. @@ -242,22 +242,23 @@ pub const StringLiteralAdapter = struct { }; const MonomorphedFuncsSet = std.HashMapUnmanaged( - *Fn, + Fn.Index, void, MonomorphedFuncsContext, std.hash_map.default_max_load_percentage, ); const MonomorphedFuncsContext = struct { - pub fn eql(ctx: @This(), a: *Fn, b: *Fn) bool { + mod: *Module, + + pub fn eql(ctx: @This(), a: Fn.Index, b: Fn.Index) bool { _ = ctx; return a == b; } /// Must match `Sema.GenericCallAdapter.hash`. - pub fn hash(ctx: @This(), key: *Fn) u64 { - _ = ctx; - return key.hash; + pub fn hash(ctx: @This(), key: Fn.Index) u64 { + return ctx.mod.funcPtr(key).hash; } }; @@ -272,7 +273,7 @@ pub const MemoizedCall = struct { module: *Module, pub const Key = struct { - func: *Fn, + func: Fn.Index, args: []TypedValue, }; @@ -652,21 +653,12 @@ pub const Decl = struct { pub fn clearValues(decl: *Decl, mod: *Module) void { const gpa = mod.gpa; - if (decl.getExternFn()) |extern_fn| { - extern_fn.deinit(gpa); - gpa.destroy(extern_fn); - } - if (decl.getFunction()) |func| { + if (decl.getFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); - if (func.comptime_args != null) { - _ = mod.monomorphed_funcs.remove(func); + if (mod.funcPtr(func).comptime_args != null) { + _ = mod.monomorphed_funcs.removeContext(func, .{ .mod = mod }); } - func.deinit(gpa); - gpa.destroy(func); - } - if (decl.getVariable()) |variable| { - variable.deinit(gpa); - gpa.destroy(variable); + mod.destroyFunc(func); } if (decl.value_arena) |value_arena| { if (decl.owns_tv) { @@ -835,11 +827,11 @@ pub const Decl = struct { /// If the Decl has a value and it is a struct, return it, /// otherwise null. - pub fn getStruct(decl: *Decl, mod: *Module) ?*Struct { - return mod.structPtrUnwrap(getStructIndex(decl, mod)); + pub fn getStruct(decl: Decl, mod: *Module) ?*Struct { + return mod.structPtrUnwrap(decl.getStructIndex(mod)); } - pub fn getStructIndex(decl: *Decl, mod: *Module) Struct.OptionalIndex { + pub fn getStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; if (decl.val.ip_index == .none) return .none; return mod.intern_pool.indexToStructType(decl.val.ip_index); @@ -847,7 +839,7 @@ pub const Decl = struct { /// If the Decl has a value and it is a union, return it, /// otherwise null. - pub fn getUnion(decl: *Decl, mod: *Module) ?*Union { + pub fn getUnion(decl: Decl, mod: *Module) ?*Union { if (!decl.owns_tv) return null; if (decl.val.ip_index == .none) return null; return mod.typeToUnion(decl.val.toType()); @@ -855,32 +847,30 @@ pub const Decl = struct { /// If the Decl has a value and it is a function, return it, /// otherwise null. - pub fn getFunction(decl: *const Decl) ?*Fn { - if (!decl.owns_tv) return null; - const func = (decl.val.castTag(.function) orelse return null).data; - return func; + pub fn getFunction(decl: Decl, mod: *Module) ?*Fn { + return mod.funcPtrUnwrap(decl.getFunctionIndex(mod)); + } + + pub fn getFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { + return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none; } /// If the Decl has a value and it is an extern function, returns it, /// otherwise null. - pub fn getExternFn(decl: *const Decl) ?*ExternFn { - if (!decl.owns_tv) return null; - const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data; - return extern_fn; + pub fn getExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc { + return if (decl.owns_tv) decl.val.getExternFunc(mod) else null; } /// If the Decl has a value and it is a variable, returns it, /// otherwise null. - pub fn getVariable(decl: *const Decl) ?*Var { - if (!decl.owns_tv) return null; - const variable = (decl.val.castTag(.variable) orelse return null).data; - return variable; + pub fn getVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable { + return if (decl.owns_tv) decl.val.getVariable(mod) else null; } /// Gets the namespace that this Decl creates by being a struct, union, /// enum, or opaque. /// Only returns it if the Decl is the owner. - pub fn getInnerNamespaceIndex(decl: *Decl, mod: *Module) Namespace.OptionalIndex { + pub fn getInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex { if (!decl.owns_tv) return .none; return switch (decl.val.ip_index) { .empty_struct_type => .none, @@ -896,8 +886,8 @@ pub const Decl = struct { } /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. - pub fn getInnerNamespace(decl: *Decl, mod: *Module) ?*Namespace { - return if (getInnerNamespaceIndex(decl, mod).unwrap()) |i| mod.namespacePtr(i) else null; + pub fn getInnerNamespace(decl: Decl, mod: *Module) ?*Namespace { + return if (decl.getInnerNamespaceIndex(mod).unwrap()) |i| mod.namespacePtr(i) else null; } pub fn dump(decl: *Decl) void { @@ -927,14 +917,11 @@ pub const Decl = struct { assert(decl.dependencies.swapRemove(other)); } - pub fn isExtern(decl: Decl) bool { + pub fn isExtern(decl: Decl, mod: *Module) bool { assert(decl.has_tv); - return switch (decl.val.ip_index) { - .none => switch (decl.val.tag()) { - .extern_fn => true, - .variable => decl.val.castTag(.variable).?.data.init.ip_index == .unreachable_value, - else => false, - }, + return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .variable => |variable| variable.is_extern, + .extern_func => true, else => false, }; } @@ -1494,6 +1481,28 @@ pub const Fn = struct { is_noinline: bool, calls_or_awaits_errorable_fn: bool = false, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + pub const Analysis = enum { /// This function has not yet undergone analysis, because we have not /// seen a potential runtime call. It may be analyzed in future. @@ -1519,7 +1528,7 @@ pub const Fn = struct { /// or comptime functions. pub const InferredErrorSet = struct { /// The function from which this error set originates. - func: *Fn, + func: Fn.Index, /// All currently known errors that this error set contains. This includes /// direct additions via `return error.Foo;`, and possibly also errors that @@ -1543,8 +1552,8 @@ pub const Fn = struct { pub const Index = enum(u32) { _, - pub fn toOptional(i: Index) OptionalIndex { - return @intToEnum(OptionalIndex, @enumToInt(i)); + pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(i)); } }; @@ -1552,13 +1561,13 @@ pub const Fn = struct { none = std.math.maxInt(u32), _, - pub fn init(oi: ?Index) OptionalIndex { - return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(oi orelse return .none)); } - pub fn unwrap(oi: OptionalIndex) ?Index { + pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { if (oi == .none) return null; - return @intToEnum(Index, @enumToInt(oi)); + return @intToEnum(InferredErrorSet.Index, @enumToInt(oi)); } }; @@ -1587,12 +1596,6 @@ pub const Fn = struct { } }; - /// TODO: remove this function - pub fn deinit(func: *Fn, gpa: Allocator) void { - _ = func; - _ = gpa; - } - pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { const file = mod.declPtr(func.owner_decl).getFileScope(mod); @@ -1647,28 +1650,6 @@ pub const Fn = struct { } }; -pub const Var = struct { - /// if is_extern == true this is undefined - init: Value, - owner_decl: Decl.Index, - - /// Library name if specified. - /// For example `extern "c" var stderrp = ...` would have 'c' as library name. - /// Allocated with Module's allocator; outlives the ZIR code. - lib_name: ?[*:0]const u8, - - is_extern: bool, - is_mutable: bool, - is_threadlocal: bool, - is_weak_linkage: bool, - - pub fn deinit(variable: *Var, gpa: Allocator) void { - if (variable.lib_name) |lib_name| { - gpa.free(mem.sliceTo(lib_name, 0)); - } - } -}; - pub const DeclAdapter = struct { mod: *Module, @@ -3472,6 +3453,10 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { return mod.intern_pool.structPtr(index); } +pub fn funcPtr(mod: *Module, index: Fn.Index) *Fn { + return mod.intern_pool.funcPtr(index); +} + pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet { return mod.intern_pool.inferredErrorSetPtr(index); } @@ -3479,7 +3464,11 @@ pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.I /// This one accepts an index from the InternPool and asserts that it is not /// the anonymous empty struct type. pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { - return structPtr(mod, index.unwrap() orelse return null); + return mod.structPtr(index.unwrap() orelse return null); +} + +pub fn funcPtrUnwrap(mod: *Module, index: Fn.OptionalIndex) ?*Fn { + return mod.funcPtr(index.unwrap() orelse return null); } /// Returns true if and only if the Decl is the top level struct associated with a File. @@ -3952,7 +3941,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { }; } - if (decl.getFunction()) |func| { + if (decl.getFunction(mod)) |func| { func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { try file.deleted_decls.append(gpa, decl_index); continue; @@ -4139,7 +4128,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.deleteDeclExports(decl_index); // Similarly, `@setAlignStack` invocations will be re-discovered. - if (decl.getFunction()) |func| { + if (decl.getFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); } @@ -4229,10 +4218,11 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { } } -pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { +pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4264,7 +4254,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { defer tmp_arena.deinit(); const sema_arena = tmp_arena.allocator(); - var air = mod.analyzeFnBody(func, sema_arena) catch |err| switch (err) { + var air = mod.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { error.AnalysisFail => { if (func.state == .in_progress) { // If this decl caused the compile error, the analysis field would @@ -4333,7 +4323,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (no_bin_file and !dump_llvm_ir) return; - comp.bin_file.updateFunc(mod, func, air, liveness) catch |err| switch (err) { + comp.bin_file.updateFunc(mod, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { decl.analysis = .codegen_failure; @@ -4363,7 +4353,8 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { /// analyzed, and for ensuring it can exist at runtime (see /// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`. -pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { +pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4401,7 +4392,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { // Decl itself is safely analyzed, and body analysis is not yet queued - try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); + try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index }); if (mod.emit_h != null) { // TODO: we ideally only want to do this if the function's type changed // since the last update @@ -4532,8 +4523,10 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .owner_decl = new_decl, .owner_decl_index = new_decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -4628,8 +4621,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -4707,8 +4702,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return true; } - if (decl_tv.val.castTag(.function)) |fn_payload| { - const func = fn_payload.data; + if (mod.intern_pool.indexToFunc(decl_tv.val.ip_index).unwrap()) |func_index| { + const func = mod.funcPtr(func_index); const owns_tv = func.owner_decl == decl_index; if (owns_tv) { var prev_type_has_bits = false; @@ -4718,7 +4713,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (decl.has_tv) { prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); type_changed = !decl.ty.eql(decl_tv.ty, mod); - if (decl.getFunction()) |prev_func| { + if (decl.getFunction(mod)) |prev_func| { prev_is_inline = prev_func.state == .inline_only; } } @@ -4757,38 +4752,25 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { switch (decl_tv.val.ip_index) { .generic_poison => unreachable, .unreachable_value => unreachable, - - .none => switch (decl_tv.val.tag()) { - .variable => { - const variable = decl_tv.val.castTag(.variable).?.data; - if (variable.owner_decl == decl_index) { - decl.owns_tv = true; - queue_linker_work = true; - - const copied_init = try variable.init.copy(decl_arena_allocator); - variable.init = copied_init; - } + else => switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + .variable => |variable| if (variable.decl == decl_index) { + decl.owns_tv = true; + queue_linker_work = true; }, - .extern_fn => { - const extern_fn = decl_tv.val.castTag(.extern_fn).?.data; - if (extern_fn.owner_decl == decl_index) { - decl.owns_tv = true; - queue_linker_work = true; - is_extern = true; - } + + .extern_func => |extern_fn| if (extern_fn.decl == decl_index) { + decl.owns_tv = true; + queue_linker_work = true; + is_extern = true; }, - .function => {}, + .func => {}, else => { log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); queue_linker_work = true; }, }, - else => { - log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); - queue_linker_work = true; - }, } decl.ty = decl_tv.ty; @@ -4810,12 +4792,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.ip_index) { - .none => switch (decl_tv.val.tag()) { - .function, .extern_fn => .function, - .variable => .variable, - else => .constant, - }, + const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + .variable => .variable, + .extern_func, .func => .function, else => .constant, }; @@ -5388,7 +5367,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @intCast(u32, decl_sub_index); - if (decl.getFunction()) |_| { + if (decl.getFunctionIndex(mod) != .none) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state @@ -5572,11 +5551,12 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void export_owners.deinit(mod.gpa); } -pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { +pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -5597,8 +5577,10 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { .owner_decl = decl, .owner_decl_index = decl_index, .func = func, + .func_index = func_index.toOptional(), .fn_ret_ty = fn_ty_info.return_type.toType(), .owner_func = func, + .owner_func_index = func_index.toOptional(), .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), }; defer sema.deinit(); @@ -5807,8 +5789,7 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { for (kv.value) |err| err.deinit(mod.gpa); } if (decl.has_tv and decl.owns_tv) { - if (decl.val.castTag(.function)) |payload| { - const func = payload.data; + if (decl.getFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); } } @@ -5852,6 +5833,14 @@ pub fn destroyUnion(mod: *Module, index: Union.Index) void { return mod.intern_pool.destroyUnion(mod.gpa, index); } +pub fn createFunc(mod: *Module, initialization: Fn) Allocator.Error!Fn.Index { + return mod.intern_pool.createFunc(mod.gpa, initialization); +} + +pub fn destroyFunc(mod: *Module, index: Fn.Index) void { + return mod.intern_pool.destroyFunc(mod.gpa, index); +} + pub fn allocateNewDecl( mod: *Module, namespace: Namespace.Index, @@ -6499,7 +6488,11 @@ pub fn populateTestFunctions( try mod.ensureDeclAnalyzed(decl_index); } const decl = mod.declPtr(decl_index); - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod); + const test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod); + const null_usize = try mod.intern(.{ .opt = .{ + .ty = try mod.intern(.{ .opt_type = .usize_type }), + .val = .none, + } }); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions @@ -6512,7 +6505,7 @@ pub fn populateTestFunctions( const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ .ty = try mod.arrayType(.{ .len = test_fn_vals.len, - .child = tmp_test_fn_ty.ip_index, + .child = test_fn_ty.ip_index, .sentinel = .none, }), .val = try Value.Tag.aggregate.create(arena, test_fn_vals), @@ -6530,7 +6523,7 @@ pub fn populateTestFunctions( errdefer name_decl_arena.deinit(); const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try Type.array(name_decl_arena.allocator(), bytes.len, null, Type.u8, mod), + .ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type }), .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), }); try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); @@ -6540,16 +6533,24 @@ pub fn populateTestFunctions( array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal); try mod.linkerUpdateDecl(test_name_decl_index); - const field_vals = try arena.create([3]Value); - field_vals.* = .{ - try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index), - .len = try mod.intValue(Type.usize, test_name_slice.len), - }), // name - try Value.Tag.decl_ref.create(arena, test_decl_index), // func - Value.null, // async_frame_size + const test_fn_fields = .{ + // name + try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = test_name_decl_index }, + } }), + // func + try mod.intern(.{ .ptr = .{ + .ty = test_decl.ty.ip_index, + .addr = .{ .decl = test_decl_index }, + } }), + // async_frame_size + null_usize, }; - test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals); + test_fn_vals[i] = (try mod.intern(.{ .aggregate = .{ + .ty = test_fn_ty.ip_index, + .storage = .{ .elems = &test_fn_fields }, + } })).toValue(); } try array_decl.finalizeNewArena(&new_decl_arena); @@ -6558,36 +6559,25 @@ pub fn populateTestFunctions( try mod.linkerUpdateDecl(array_decl_index); { - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.allocator(); - - { - // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. - const new_ty = try Type.ptr(arena, mod, .{ - .size = .Slice, - .pointee_type = tmp_test_fn_ty, - .mutable = false, - .@"addrspace" = .generic, - }); - const new_var = try gpa.create(Var); - errdefer gpa.destroy(new_var); - new_var.* = decl.val.castTag(.variable).?.data.*; - new_var.init = try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index), - .len = try mod.intValue(Type.usize, mod.test_functions.count()), - }); - const new_val = try Value.Tag.variable.create(arena, new_var); - - // Since we are replacing the Decl's value we must perform cleanup on the - // previous value. - decl.clearValues(mod); - decl.ty = new_ty; - decl.val = new_val; - decl.has_tv = true; - } + const new_ty = try mod.ptrType(.{ + .elem_type = test_fn_ty.ip_index, + .is_const = true, + .size = .Slice, + }); + const new_val = decl.val; + const new_init = try mod.intern(.{ .ptr = .{ + .ty = new_ty.ip_index, + .addr = .{ .decl = array_decl_index }, + .len = (try mod.intValue(Type.usize, mod.test_functions.count())).ip_index, + } }); + mod.intern_pool.mutateVarInit(decl.val.ip_index, new_init); - try decl.finalizeNewArena(&new_decl_arena); + // Since we are replacing the Decl's value we must perform cleanup on the + // previous value. + decl.clearValues(mod); + decl.ty = new_ty; + decl.val = new_val; + decl.has_tv = true; } try mod.linkerUpdateDecl(decl_index); } @@ -6660,50 +6650,47 @@ fn reportRetryableFileError( } pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { - if (val.ip_index != .none) return; - switch (val.tag()) { - .decl_ref_mut => return mod.markDeclIndexAlive(val.castTag(.decl_ref_mut).?.data.decl_index), - .extern_fn => return mod.markDeclIndexAlive(val.castTag(.extern_fn).?.data.owner_decl), - .function => return mod.markDeclIndexAlive(val.castTag(.function).?.data.owner_decl), - .variable => return mod.markDeclIndexAlive(val.castTag(.variable).?.data.owner_decl), - .decl_ref => return mod.markDeclIndexAlive(val.cast(Value.Payload.Decl).?.data), - - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.SubValue).?.data), - - .eu_payload_ptr, - .opt_payload_ptr, - => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.PayloadPtr).?.data.container_ptr), - - .slice => { - const slice = val.cast(Value.Payload.Slice).?.data; - mod.markReferencedDeclsAlive(slice.ptr); - mod.markReferencedDeclsAlive(slice.len); - }, - - .elem_ptr => { - const elem_ptr = val.cast(Value.Payload.ElemPtr).?.data; - return mod.markReferencedDeclsAlive(elem_ptr.array_ptr); - }, - .field_ptr => { - const field_ptr = val.cast(Value.Payload.FieldPtr).?.data; - return mod.markReferencedDeclsAlive(field_ptr.container_ptr); - }, - .aggregate => { - for (val.castTag(.aggregate).?.data) |field_val| { - mod.markReferencedDeclsAlive(field_val); - } + switch (val.ip_index) { + .none => switch (val.tag()) { + .aggregate => { + for (val.castTag(.aggregate).?.data) |field_val| { + mod.markReferencedDeclsAlive(field_val); + } + }, + .@"union" => { + const data = val.castTag(.@"union").?.data; + mod.markReferencedDeclsAlive(data.tag); + mod.markReferencedDeclsAlive(data.val); + }, + else => {}, }, - .@"union" => { - const data = val.cast(Value.Payload.Union).?.data; - mod.markReferencedDeclsAlive(data.tag); - mod.markReferencedDeclsAlive(data.val); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| mod.markDeclIndexAlive(variable.decl), + .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), + .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .error_union => |error_union| switch (error_union.val) { + .err_name => {}, + .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), + }, + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| mod.markDeclIndexAlive(decl), + .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), + .int, .comptime_field => {}, + .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), + .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), + } + if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); + }, + .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| + mod.markReferencedDeclsAlive(elem.toValue()), + .un => |un| { + mod.markReferencedDeclsAlive(un.tag.toValue()); + mod.markReferencedDeclsAlive(un.val.toValue()); + }, + else => {}, }, - - else => {}, } } @@ -7075,6 +7062,12 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { return @intCast(u16, big.bitCountTwosComp()); }, + .lazy_align => |lazy_ty| { + return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @boolToInt(sign); + }, + .lazy_size => |lazy_ty| { + return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @boolToInt(sign); + }, } } diff --git a/src/Sema.zig b/src/Sema.zig index 7df6e44898..d9b346e638 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -28,10 +28,12 @@ owner_decl_index: Decl.Index, /// For an inline or comptime function call, this will be the root parent function /// which contains the callsite. Corresponds to `owner_decl`. owner_func: ?*Module.Fn, +owner_func_index: Module.Fn.OptionalIndex, /// The function this ZIR code is the body of, according to the source code. /// This starts out the same as `owner_func` and then diverges in the case of /// an inline or comptime function call. func: ?*Module.Fn, +func_index: Module.Fn.OptionalIndex, /// Used to restore the error return trace when returning a non-error from a function. error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none, /// When semantic analysis needs to know the return type of the function whose body @@ -65,7 +67,7 @@ comptime_args_fn_inst: Zir.Inst.Index = 0, /// to use this instead of allocating a fresh one. This avoids an unnecessary /// extra hash table lookup in the `monomorphed_funcs` set. /// Sema will set this to null when it takes ownership. -preallocated_new_func: ?*Module.Fn = null, +preallocated_new_func: Module.Fn.OptionalIndex = .none, /// The key is types that must be fully resolved prior to machine code /// generation pass. Types are added to this set when resolving them /// immediately could cause a dependency loop, but they do need to be resolved @@ -92,7 +94,7 @@ unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{} const std = @import("std"); const math = std.math; const mem = std.mem; -const Allocator = std.mem.Allocator; +const Allocator = mem.Allocator; const assert = std.debug.assert; const log = std.log.scoped(.sema); @@ -1777,7 +1779,7 @@ pub fn resolveConstString( reason: []const u8, ) ![]u8 { const air_inst = try sema.resolveInst(zir_ref); - const wanted_type = Type.const_slice_u8; + const wanted_type = Type.slice_const_u8; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, reason); return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); @@ -1866,11 +1868,10 @@ fn resolveConstMaybeUndefVal( if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| { switch (val.ip_index) { .generic_poison => return error.GenericPoison, - .none => switch (val.tag()) { + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, - else => return val, } } return sema.failWithNeededComptime(block, src, reason); @@ -1889,11 +1890,11 @@ fn resolveConstValue( switch (val.ip_index) { .generic_poison => return error.GenericPoison, .undef => return sema.failWithUseOfUndef(block, src), - .none => switch (val.tag()) { + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + .undef => return sema.failWithUseOfUndef(block, src), .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, - else => return val, } } return sema.failWithNeededComptime(block, src, reason); @@ -1928,11 +1929,11 @@ fn resolveMaybeUndefVal( const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; switch (val.ip_index) { .generic_poison => return error.GenericPoison, - .none => switch (val.tag()) { + .none => return val, + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { .variable => return null, else => return val, }, - else => return val, } } @@ -1948,21 +1949,20 @@ fn resolveMaybeUndefValIntable( var check = val; while (true) switch (check.ip_index) { .generic_poison => return error.GenericPoison, - .none => switch (check.tag()) { - .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return null, - .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, - .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, - .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, - else => { - try sema.resolveLazyValue(val); - return val; + .none => break, + else => switch (sema.mod.intern_pool.indexToKey(check.ip_index)) { + .variable => return null, + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => return null, + .int => break, + .eu_payload, .opt_payload => |base| check = base.toValue(), + .elem, .field => |base_index| check = base_index.base.toValue(), }, - }, - else => { - try sema.resolveLazyValue(val); - return val; + else => break, }, }; + try sema.resolveLazyValue(val); + return val; } /// Returns all Value tags including `variable` and `undef`. @@ -1994,7 +1994,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( if (air_tags[i] == .constant) { const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; - if (val.tagIsVariable()) return val; + if (val.getVariable(sema.mod) != null) return val; } return opv; } @@ -2003,7 +2003,7 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( .constant => { const ty_pl = air_datas[i].ty_pl; const val = sema.air_values.items[ty_pl.payload]; - if (val.isRuntimeValue()) make_runtime.* = true; + if (val.isRuntimeValue(sema.mod)) make_runtime.* = true; if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, @@ -2489,13 +2489,13 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE .@"addrspace" = addr_space, }); try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); - return sema.addConstant( - ptr_ty, - try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl_index = iac.data.decl_index, + return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_ty.ip_index, + .addr = .{ .mut_decl = .{ + .decl = iac.data.decl_index, .runtime_index = block.runtime_index, - }), - ); + } }, + } })).toValue()); }, else => {}, } @@ -2949,12 +2949,18 @@ fn zirEnumDecl( } const prev_owner_func = sema.owner_func; + const prev_owner_func_index = sema.owner_func_index; sema.owner_func = null; + sema.owner_func_index = .none; defer sema.owner_func = prev_owner_func; + defer sema.owner_func_index = prev_owner_func_index; const prev_func = sema.func; + const prev_func_index = sema.func_index; sema.func = null; + sema.func_index = .none; defer sema.func = prev_func; + defer sema.func_index = prev_func_index; var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); defer wip_captures.deinit(); @@ -3735,14 +3741,13 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; try sema.maybeQueueFuncBodyAnalysis(decl_index); - if (var_is_mut) { - sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl_index = decl_index, + sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ + .ty = final_ptr_ty.ip_index, + .addr = if (var_is_mut) .{ .mut_decl = .{ + .decl = decl_index, .runtime_index = block.runtime_index, - }); - } else { - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl_index); - } + } } else .{ .decl = decl_index }, + } })).toValue(); }, .inferred_alloc => { assert(sema.unresolved_inferred_allocs.remove(ptr_inst)); @@ -3836,7 +3841,10 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // block so that codegen does not see it. block.instructions.shrinkRetainingCapacity(search_index); try sema.maybeQueueFuncBodyAnalysis(new_decl_index); - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); + sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ + .ty = final_elem_ty.ip_index, + .addr = .{ .decl = new_decl_index }, + } })).toValue(); // if bitcast ty ref needs to be made const, make_ptr_const // ZIR handles it later, so we can just use the ty ref here. air_datas[ptr_inst].ty_pl.ty = air_datas[bitcast_inst].ty_op.ty; @@ -4332,12 +4340,16 @@ fn validateUnionInit( // instead a single `store` to the result ptr with a comptime union value. block.instructions.shrinkRetainingCapacity(first_block_index); - var union_val = try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = val, - }); - if (make_runtime) union_val = try Value.Tag.runtime_value.create(sema.arena, union_val); - const union_init = try sema.addConstant(union_ty, union_val); + var union_val = try mod.intern(.{ .un = .{ + .ty = union_ty.ip_index, + .tag = tag_val.ip_index, + .val = val.ip_index, + } }); + if (make_runtime) union_val = try mod.intern(.{ .runtime_value = .{ + .ty = union_ty.ip_index, + .val = union_val, + } }); + const union_init = try sema.addConstant(union_ty, union_val.toValue()); try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store); return; } else if (try sema.typeRequiresComptime(union_ty)) { @@ -4464,14 +4476,15 @@ fn validateStructInit( // We collect the comptime field values in case the struct initialization // ends up being comptime-known. - const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount(mod)); + const field_values = try sema.gpa.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); + defer sema.gpa.free(field_values); field: for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) { // Determine whether the value stored to this pointer is comptime-known. const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { - field_values[i] = opv; + field_values[i] = opv.ip_index; continue; } @@ -4536,7 +4549,7 @@ fn validateStructInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - field_values[i] = val; + field_values[i] = val.ip_index; } else if (require_comptime) { const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known"); @@ -4570,7 +4583,7 @@ fn validateStructInit( } continue; } - field_values[i] = default_val; + field_values[i] = default_val.ip_index; } if (root_msg) |msg| { @@ -4593,9 +4606,15 @@ fn validateStructInit( // instead a single `store` to the struct_ptr with a comptime struct value. block.instructions.shrinkRetainingCapacity(first_block_index); - var struct_val = try Value.Tag.aggregate.create(sema.arena, field_values); - if (make_runtime) struct_val = try Value.Tag.runtime_value.create(sema.arena, struct_val); - const struct_init = try sema.addConstant(struct_ty, struct_val); + var struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.ip_index, + .storage = .{ .elems = field_values }, + } }); + if (make_runtime) struct_val = try mod.intern(.{ .runtime_value = .{ + .ty = struct_ty.ip_index, + .val = struct_val, + } }); + const struct_init = try sema.addConstant(struct_ty, struct_val.toValue()); try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } @@ -4611,7 +4630,7 @@ fn validateStructInit( else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); const field_ty = sema.typeOf(default_field_ptr).childType(mod); - const init = try sema.addConstant(field_ty, field_values[i]); + const init = try sema.addConstant(field_ty, field_values[i].toValue()); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } } @@ -4691,7 +4710,8 @@ fn zirValidateArrayInit( // Collect the comptime element values in case the array literal ends up // being comptime-known. const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod)); - const element_vals = try sema.arena.alloc(Value, array_len_s); + const element_vals = try sema.gpa.alloc(InternPool.Index, array_len_s); + defer sema.gpa.free(element_vals); const opt_opv = try sema.typeHasOnePossibleValue(array_ty); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); @@ -4701,13 +4721,13 @@ fn zirValidateArrayInit( if (array_ty.isTuple(mod)) { if (try array_ty.structFieldValueComptime(mod, i)) |opv| { - element_vals[i] = opv; + element_vals[i] = opv.ip_index; continue; } } else { // Array has one possible value, so value is always comptime-known if (opt_opv) |opv| { - element_vals[i] = opv; + element_vals[i] = opv.ip_index; continue; } } @@ -4768,7 +4788,7 @@ fn zirValidateArrayInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - element_vals[i] = val; + element_vals[i] = val.ip_index; } else { array_is_comptime = false; } @@ -4780,9 +4800,12 @@ fn zirValidateArrayInit( if (array_is_comptime) { if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| { - if (ptr_val.tag() == .comptime_field_ptr) { - // This store was validated by the individual elem ptrs. - return; + switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .comptime_field => return, // This store was validated by the individual elem ptrs. + else => {}, + }, + else => {}, } } @@ -4790,14 +4813,20 @@ fn zirValidateArrayInit( // instead a single `store` to the array_ptr with a comptime struct value. // Also to populate the sentinel value, if any. if (array_ty.sentinel(mod)) |sentinel_val| { - element_vals[instrs.len] = sentinel_val; + element_vals[instrs.len] = sentinel_val.ip_index; } block.instructions.shrinkRetainingCapacity(first_block_index); - var array_val = try Value.Tag.aggregate.create(sema.arena, element_vals); - if (make_runtime) array_val = try Value.Tag.runtime_value.create(sema.arena, array_val); - const array_init = try sema.addConstant(array_ty, array_val); + var array_val = try mod.intern(.{ .aggregate = .{ + .ty = array_ty.ip_index, + .storage = .{ .elems = element_vals }, + } }); + if (make_runtime) array_val = try mod.intern(.{ .runtime_value = .{ + .ty = array_ty.ip_index, + .val = array_val, + } }); + const array_init = try sema.addConstant(array_ty, array_val.toValue()); try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); } } @@ -5029,7 +5058,7 @@ fn storeToInferredAllocComptime( // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl. if (try sema.resolveMaybeUndefValAllowVariables(operand)) |operand_val| store: { - if (operand_val.tagIsVariable()) break :store; + if (operand_val.getVariable(sema.mod) != null) break :store; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); iac.data.decl_index = try anon_decl.finish( @@ -5717,8 +5746,8 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { try mod.ensureDeclAnalyzed(decl_index); const exported_decl = mod.declPtr(decl_index); - if (exported_decl.val.castTag(.function)) |some| { - return sema.analyzeExport(block, src, options, some.data.owner_decl); + if (exported_decl.getFunction(mod)) |function| { + return sema.analyzeExport(block, src, options, function.owner_decl); } } try sema.analyzeExport(block, src, options, decl_index); @@ -5741,17 +5770,14 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError }, else => |e| return e, }; - const decl_index = switch (operand.val.tag()) { - .function => operand.val.castTag(.function).?.data.owner_decl, - else => blk: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - break :blk try anon_decl.finish( - operand.ty, - try operand.val.copy(anon_decl.arena()), - 0, - ); - }, + const decl_index = if (operand.val.getFunction(sema.mod)) |function| function.owner_decl else blk: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + break :blk try anon_decl.finish( + operand.ty, + try operand.val.copy(anon_decl.arena()), + 0, + ); }; try sema.analyzeExport(block, src, options, decl_index); } @@ -5788,7 +5814,7 @@ pub fn analyzeExport( } // TODO: some backends might support re-exporting extern decls - if (exported_decl.isExtern()) { + if (exported_decl.isExtern(mod)) { return sema.fail(block, src, "export target cannot be extern", .{}); } @@ -5796,7 +5822,7 @@ pub fn analyzeExport( mod.markDeclAlive(exported_decl); try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); - const gpa = mod.gpa; + const gpa = sema.gpa; try mod.decl_exports.ensureUnusedCapacity(gpa, 1); try mod.export_owners.ensureUnusedCapacity(gpa, 1); @@ -5852,8 +5878,9 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst alignment, }); } - const func = sema.func orelse + const func_index = sema.func_index.unwrap() orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); + const func = mod.funcPtr(func_index); const fn_owner_decl = mod.declPtr(func.owner_decl); switch (fn_owner_decl.ty.fnCallingConvention(mod)) { @@ -5864,7 +5891,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst }, } - const gop = try mod.align_stack_fns.getOrPut(mod.gpa, func); + const gop = try mod.align_stack_fns.getOrPut(sema.gpa, func_index); if (gop.found_existing) { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); @@ -6191,10 +6218,13 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { const mod = sema.mod; const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null; if (func_val.isUndef(mod)) return null; - const owner_decl_index = switch (func_val.tag()) { - .extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl, - .function => func_val.castTag(.function).?.data.owner_decl, - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl, + const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => return null, + }, else => return null, }; return mod.declPtr(owner_decl_index); @@ -6576,20 +6606,22 @@ const GenericCallAdapter = struct { is_anytype: bool, }; - pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool { + pub fn eql(ctx: @This(), adapted_key: void, other_key: Module.Fn.Index) bool { _ = adapted_key; + const other_func = ctx.module.funcPtr(other_key); + // Checking for equality may happen on an item that has been inserted // into the map but is not yet fully initialized. In such case, the // two initialized fields are `hash` and `generic_owner_decl`. - if (ctx.generic_fn.owner_decl != other_key.generic_owner_decl.unwrap().?) return false; + if (ctx.generic_fn.owner_decl != other_func.generic_owner_decl.unwrap().?) return false; - const other_comptime_args = other_key.comptime_args.?; + const other_comptime_args = other_func.comptime_args.?; for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| { const this_arg = ctx.args[i]; const this_is_comptime = !this_arg.val.isGenericPoison(); const other_is_comptime = !other_arg.val.isGenericPoison(); const this_is_anytype = this_arg.is_anytype; - const other_is_anytype = other_key.isAnytypeParam(ctx.module, @intCast(u32, i)); + const other_is_anytype = other_func.isAnytypeParam(ctx.module, @intCast(u32, i)); if (other_is_anytype != this_is_anytype) return false; if (other_is_comptime != this_is_comptime) return false; @@ -6663,7 +6695,7 @@ fn analyzeCall( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6760,18 +6792,21 @@ fn analyzeCall( if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err); return err; }; - const module_fn = switch (func_val.tag()) { - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, - .function => func_val.castTag(.function).?.data, - .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{ + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), - else => { - assert(callee_ty.isPtrAtRuntime(mod)); - return sema.fail(block, call_src, "{s} call of function pointer", .{ - @as([]const u8, if (is_comptime_call) "comptime" else "inline"), - }); + .func => |function| function.index, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).getFunctionIndex(mod).unwrap().?, + else => { + assert(callee_ty.isPtrAtRuntime(mod)); + return sema.fail(block, call_src, "{s} call of function pointer", .{ + @as([]const u8, if (is_comptime_call) "comptime" else "inline"), + }); + }, }, + else => unreachable, }; if (func_ty_info.is_var_args) { return sema.fail(block, call_src, "{s} call of variadic function", .{ @@ -6804,6 +6839,7 @@ fn analyzeCall( // In order to save a bit of stack space, directly modify Sema rather // than create a child one. const parent_zir = sema.code; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); sema.code = fn_owner_decl.getFileScope(mod).zir; defer sema.code = parent_zir; @@ -6819,8 +6855,11 @@ fn analyzeCall( } const parent_func = sema.func; + const parent_func_index = sema.func_index; sema.func = module_fn; + sema.func_index = module_fn_index.toOptional(); defer sema.func = parent_func; + defer sema.func_index = parent_func_index; const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry; sema.error_return_trace_index_on_fn_entry = block.error_return_trace_index; @@ -6856,7 +6895,7 @@ fn analyzeCall( defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args); if (is_comptime_call) { memoized_call_key = .{ - .func = module_fn, + .func = module_fn_index, .args = try gpa.alloc(TypedValue, func_ty_info.param_types.len), }; delete_memoized_call_key = true; @@ -6889,7 +6928,7 @@ fn analyzeCall( &child_block, .unneeded, inst, - new_fn_info, + &new_fn_info, &arg_i, uncasted_args, is_comptime_call, @@ -6907,7 +6946,7 @@ fn analyzeCall( &child_block, mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src), inst, - new_fn_info, + &new_fn_info, &arg_i, uncasted_args, is_comptime_call, @@ -6950,7 +6989,7 @@ fn analyzeCall( const fn_ret_ty = blk: { if (module_fn.hasInferredErrorSet(mod)) { const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ - .func = module_fn, + .func = module_fn_index, }); const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); @@ -6982,7 +7021,7 @@ fn analyzeCall( const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { - try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin); + try sema.emitDbgInline(block, parent_func_index.unwrap().?, module_fn_index, new_func_resolved_ty, .dbg_inline_begin); const zir_tags = sema.code.instructions.items(.tag); for (fn_info.param_body) |param| switch (zir_tags[param]) { @@ -7014,7 +7053,7 @@ fn analyzeCall( error.ComptimeReturn => break :result inlining.comptime_result, error.AnalysisFail => { const err_msg = sema.err orelse return err; - if (std.mem.eql(u8, err_msg.msg, recursive_msg)) return err; + if (mem.eql(u8, err_msg.msg, recursive_msg)) return err; try sema.errNote(block, call_src, err_msg, "called from here", .{}); err_msg.clearTrace(sema.gpa); return err; @@ -7027,8 +7066,8 @@ fn analyzeCall( if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) { try sema.emitDbgInline( block, - module_fn, - parent_func.?, + module_fn_index, + parent_func_index.unwrap().?, mod.declPtr(parent_func.?.owner_decl).ty, .dbg_inline_end, ); @@ -7120,8 +7159,8 @@ fn analyzeCall( } if (try sema.resolveMaybeUndefVal(func)) |func_val| { - if (func_val.castTag(.function)) |func_obj| { - try sema.mod.ensureFuncBodyAnalysisQueued(func_obj.data); + if (mod.intern_pool.indexToFunc(func_val.toIntern()).unwrap()) |func_index| { + try sema.mod.ensureFuncBodyAnalysisQueued(func_index); } } @@ -7147,9 +7186,9 @@ fn analyzeCall( // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. check: { - var func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check; - switch (func_val.tag()) { - .function, .decl_ref => { + const func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check; + switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .func, .extern_func, .ptr => { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; }, @@ -7196,7 +7235,7 @@ fn analyzeInlineCallArg( param_block: *Block, arg_src: LazySrcLoc, inst: Zir.Inst.Index, - new_fn_info: InternPool.Key.FuncType, + new_fn_info: *InternPool.Key.FuncType, arg_i: *usize, uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, @@ -7263,7 +7302,7 @@ fn analyzeInlineCallArg( try sema.resolveLazyValue(arg_val); }, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); + should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod); memoized_call_key.args[arg_i.*] = .{ .ty = param_ty.toType(), .val = arg_val, @@ -7302,7 +7341,7 @@ fn analyzeInlineCallArg( try sema.resolveLazyValue(arg_val); }, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); + should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(sema.mod); memoized_call_key.args[arg_i.*] = .{ .ty = sema.typeOf(uncasted_arg), .val = arg_val, @@ -7387,11 +7426,11 @@ fn instantiateGenericCall( const gpa = sema.gpa; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = switch (func_val.tag()) { - .function => func_val.castTag(.function).?.data, - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, + const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .func => |function| function.index, + .ptr => |ptr| mod.declPtr(ptr.addr.decl).getFunctionIndex(mod).unwrap().?, else => unreachable, - }; + }); // Check the Module's generic function map with an adapted context, so that we // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. @@ -7496,16 +7535,17 @@ fn instantiateGenericCall( .args = generic_args, .module = mod, }; - const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter); - const callee = if (!gop.found_existing) callee: { - const new_module_func = try gpa.create(Module.Fn); + const gop = try mod.monomorphed_funcs.getOrPutContextAdapted(gpa, {}, adapter, .{ .mod = mod }); + const callee_index = if (!gop.found_existing) callee: { + const new_module_func_index = try mod.createFunc(undefined); + const new_module_func = mod.funcPtr(new_module_func_index); // This ensures that we can operate on the hash map before the Module.Fn // struct is fully initialized. new_module_func.hash = precomputed_hash; new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional(); new_module_func.comptime_args = null; - gop.key_ptr.* = new_module_func; + gop.key_ptr.* = new_module_func_index; try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); @@ -7549,7 +7589,7 @@ fn instantiateGenericCall( new_decl_index, uncasted_args, module_fn, - new_module_func, + new_module_func_index, namespace_index, func_ty_info, call_src, @@ -7565,12 +7605,12 @@ fn instantiateGenericCall( } assert(namespace.anon_decls.orderedRemove(new_decl_index)); mod.destroyDecl(new_decl_index); - assert(mod.monomorphed_funcs.remove(new_module_func)); - gpa.destroy(new_module_func); + assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod })); + mod.destroyFunc(new_module_func_index); return err; }, else => { - assert(mod.monomorphed_funcs.remove(new_module_func)); + assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod })); { errdefer new_decl_arena.deinit(); try new_decl.finalizeNewArena(&new_decl_arena); @@ -7590,6 +7630,7 @@ fn instantiateGenericCall( try new_decl.finalizeNewArena(&new_decl_arena); break :callee new_func; } else gop.key_ptr.*; + const callee = mod.funcPtr(callee_index); callee.branch_quota = @max(callee.branch_quota, sema.branch_quota); @@ -7645,7 +7686,7 @@ fn instantiateGenericCall( sema.owner_func.?.calls_or_awaits_errorable_fn = true; } - try sema.mod.ensureFuncBodyAnalysisQueued(callee); + try sema.mod.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args_len); @@ -7682,12 +7723,12 @@ fn resolveGenericInstantiationType( new_decl_index: Decl.Index, uncasted_args: []const Air.Inst.Ref, module_fn: *Module.Fn, - new_module_func: *Module.Fn, + new_module_func: Module.Fn.Index, namespace: Namespace.Index, func_ty_info: InternPool.Key.FuncType, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, -) !*Module.Fn { +) !Module.Fn.Index { const mod = sema.mod; const gpa = sema.gpa; @@ -7707,11 +7748,13 @@ fn resolveGenericInstantiationType( .owner_decl = new_decl, .owner_decl_index = new_decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len), .comptime_args_fn_inst = module_fn.zir_body_inst, - .preallocated_new_func = new_module_func, + .preallocated_new_func = new_module_func.toOptional(), .is_generic_instantiation = true, .branch_quota = sema.branch_quota, .branch_count = sema.branch_count, @@ -7802,8 +7845,8 @@ fn resolveGenericInstantiationType( const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst); const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable; - const new_func = new_func_val.castTag(.function).?.data; - errdefer new_func.deinit(gpa); + const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; + errdefer mod.destroyFunc(new_func); assert(new_func == new_module_func); arg_i = 0; @@ -7867,7 +7910,10 @@ fn resolveGenericInstantiationType( return error.GenericPoison; } - new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func); + new_decl.val = (try mod.intern(.{ .func = .{ + .ty = new_decl.ty.ip_index, + .index = new_func, + } })).toValue(); new_decl.@"align" = 0; new_decl.has_tv = true; new_decl.owns_tv = true; @@ -7900,8 +7946,8 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) fn emitDbgInline( sema: *Sema, block: *Block, - old_func: *Module.Fn, - new_func: *Module.Fn, + old_func: Module.Fn.Index, + new_func: Module.Fn.Index, new_func_ty: Type, tag: Air.Inst.Tag, ) CompileError!void { @@ -7910,7 +7956,10 @@ fn emitDbgInline( // Recursive inline call; no dbg_inline needed. if (old_func == new_func) return; - try sema.air_values.append(sema.gpa, try Value.Tag.function.create(sema.arena, new_func)); + try sema.air_values.append(sema.gpa, (try sema.mod.intern(.{ .func = .{ + .ty = new_func_ty.ip_index, + .index = new_func, + } })).toValue()); _ = try block.addInst(.{ .tag = tag, .data = .{ .ty_pl = .{ @@ -8078,12 +8127,11 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const name = inst_data.get(sema.code); // Create an error set type with only this error value, and return the value. const kv = try sema.mod.getErrorValue(name); - return sema.addConstant( - try mod.singleErrorSetType(kv.key), - try Value.Tag.@"error".create(sema.arena, .{ - .name = kv.key, - }), - ); + const error_set_type = try mod.singleErrorSetType(kv.key); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), + } })).toValue()); } fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { @@ -8101,23 +8149,11 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } - switch (val.tag()) { - .@"error" => { - return sema.addConstant( - Type.err_int, - try mod.intValue( - Type.err_int, - (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, - ), - ); - }, - - // This is not a valid combination with the type `anyerror`. - .the_only_possible_value => unreachable, - - // Assume it's already encoded as an integer. - else => return sema.addConstant(Type.err_int, val), - } + const err_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + return sema.addConstant(Type.err_int, try mod.intValue( + Type.err_int, + (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value, + )); } const op_ty = sema.typeOf(uncasted_operand); @@ -8142,23 +8178,21 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const uncasted_operand = try sema.resolveInst(extra.operand); const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src); - const mod = sema.mod; if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod)); if (int > sema.mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); - const payload = try sema.arena.create(Value.Payload.Error); - payload.* = .{ - .base = .{ .tag = .@"error" }, - .data = .{ .name = sema.mod.error_name_list.items[int] }, - }; - return sema.addConstant(Type.anyerror, Value.initPayload(&payload.base)); + return sema.addConstant(Type.anyerror, (try mod.intern(.{ .err = .{ + .ty = .anyerror_type, + .name = mod.intern_pool.getString(sema.mod.error_name_list.items[int]).unwrap().?, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { @@ -8234,12 +8268,12 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); - return sema.addConstant( - .{ .ip_index = .enum_literal_type }, - try Value.Tag.enum_literal.create(sema.arena, duped_name), - ); + const name = inst_data.get(sema.code); + return sema.addConstant(.{ .ip_index = .enum_literal_type }, (try mod.intern(.{ + .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name), + })).toValue()); } fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -8404,32 +8438,26 @@ fn analyzeOptionalPayloadPtr( if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| { if (initializing) { - if (!ptr_val.isComptimeMutablePtr()) { + if (!ptr_val.isComptimeMutablePtr(mod)) { // If the pointer resulting from this function was stored at comptime, // the optional non-null bit would be set that way. But in this case, // we need to emit a runtime instruction to do it. _ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); } - return sema.addConstant( - child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(mod), - }), - ); + return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ + .ty = child_pointer.ip_index, + .addr = .{ .opt_payload = ptr_val.ip_index }, + } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. - return sema.addConstant( - child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(mod), - }), - ); + return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ + .ty = child_pointer.ip_index, + .addr = .{ .opt_payload = ptr_val.ip_index }, + } })).toValue()); } } @@ -8532,11 +8560,13 @@ fn analyzeErrUnionPayload( const mod = sema.mod; const payload_ty = err_union_ty.errorUnionPayload(mod); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - if (val.getError()) |name| { + if (val.getError(mod)) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } - const data = val.castTag(.eu_payload).?.data; - return sema.addConstant(payload_ty, data); + return sema.addConstant( + payload_ty, + mod.intern_pool.indexToKey(val.ip_index).error_union.val.payload.toValue(), + ); } try sema.requireRuntimeBlock(block, src, null); @@ -8595,33 +8625,26 @@ fn analyzeErrUnionPayloadPtr( if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| { if (initializing) { - if (!ptr_val.isComptimeMutablePtr()) { + if (!ptr_val.isComptimeMutablePtr(mod)) { // If the pointer resulting from this function was stored at comptime, // the error union error code would be set that way. But in this case, // we need to emit a runtime instruction to do it. try sema.requireRuntimeBlock(block, src, null); _ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); } - return sema.addConstant( - operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = operand_ty.childType(mod), - }), - ); + return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ + .ty = operand_pointer_ty.ip_index, + .addr = .{ .eu_payload = ptr_val.ip_index }, + } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { - if (val.getError()) |name| { + if (val.getError(mod)) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } - - return sema.addConstant( - operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = operand_ty.childType(mod), - }), - ); + return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ + .ty = operand_pointer_ty.ip_index, + .addr = .{ .eu_payload = ptr_val.ip_index }, + } })).toValue()); } } @@ -8664,7 +8687,7 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air const result_ty = operand_ty.errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - assert(val.getError() != null); + assert(val.getError(mod) != null); return sema.addConstant(result_ty, val); } @@ -8694,7 +8717,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { - assert(val.getError() != null); + assert(val.getError(mod) != null); return sema.addConstant(result_ty, val); } } @@ -8931,20 +8954,21 @@ fn funcCommon( } var destroy_fn_on_error = false; - const new_func: *Module.Fn = new_func: { + const new_func_index = new_func: { if (!has_body) break :new_func undefined; if (sema.comptime_args_fn_inst == func_inst) { - const new_func = sema.preallocated_new_func.?; - sema.preallocated_new_func = null; // take ownership - break :new_func new_func; + const new_func_index = sema.preallocated_new_func.unwrap().?; + sema.preallocated_new_func = .none; // take ownership + break :new_func new_func_index; } destroy_fn_on_error = true; - const new_func = try gpa.create(Module.Fn); + var new_func: Module.Fn = undefined; // Set this here so that the inferred return type can be printed correctly if it appears in an error. new_func.owner_decl = sema.owner_decl_index; - break :new_func new_func; + const new_func_index = try mod.createFunc(new_func); + break :new_func new_func_index; }; - errdefer if (destroy_fn_on_error) gpa.destroy(new_func); + errdefer if (destroy_fn_on_error) mod.destroyFunc(new_func_index); const target = sema.mod.getTarget(); const fn_ty: Type = fn_ty: { @@ -9008,7 +9032,7 @@ fn funcCommon( else blk: { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ - .func = new_func, + .func = new_func_index, }); const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); @@ -9158,26 +9182,16 @@ fn funcCommon( sema.owner_decl.@"addrspace" = address_space orelse .generic; if (is_extern) { - const new_extern_fn = try gpa.create(Module.ExternFn); - errdefer gpa.destroy(new_extern_fn); - - new_extern_fn.* = Module.ExternFn{ - .owner_decl = sema.owner_decl_index, - .lib_name = null, - }; - - if (opt_lib_name) |lib_name| { - new_extern_fn.lib_name = try sema.handleExternLibName(block, .{ - .node_offset_lib_name = src_node_offset, - }, lib_name); - } - - const extern_fn_payload = try sema.arena.create(Value.Payload.ExternFn); - extern_fn_payload.* = .{ - .base = .{ .tag = .extern_fn }, - .data = new_extern_fn, - }; - return sema.addConstant(fn_ty, Value.initPayload(&extern_fn_payload.base)); + return sema.addConstant(fn_ty, (try mod.intern(.{ .extern_func = .{ + .ty = fn_ty.ip_index, + .decl = sema.owner_decl_index, + .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString( + gpa, + try sema.handleExternLibName(block, .{ + .node_offset_lib_name = src_node_offset, + }, lib_name), + )).toOptional() else .none, + } })).toValue()); } if (!has_body) { @@ -9191,9 +9205,9 @@ fn funcCommon( break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; } else null; + const new_func = mod.funcPtr(new_func_index); const hash = new_func.hash; const generic_owner_decl = if (comptime_args == null) .none else new_func.generic_owner_decl; - const fn_payload = try sema.arena.create(Value.Payload.Function); new_func.* = .{ .state = anal_state, .zir_body_inst = func_inst, @@ -9208,11 +9222,10 @@ fn funcCommon( .branch_quota = default_branch_quota, .is_noinline = is_noinline, }; - fn_payload.* = .{ - .base = .{ .tag = .function }, - .data = new_func, - }; - return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base)); + return sema.addConstant(fn_ty, (try mod.intern(.{ .func = .{ + .ty = fn_ty.ip_index, + .index = new_func_index, + } })).toValue()); } fn analyzeParameter( @@ -9312,7 +9325,7 @@ fn zirParam( const prev_preallocated_new_func = sema.preallocated_new_func; const prev_no_partial_func_type = sema.no_partial_func_ty; block.params = .{}; - sema.preallocated_new_func = null; + sema.preallocated_new_func = .none; sema.no_partial_func_ty = true; defer { block.params.deinit(sema.gpa); @@ -9369,7 +9382,7 @@ fn zirParam( else => |e| return e, } or comptime_syntax; if (sema.inst_map.get(inst)) |arg| { - if (is_comptime and sema.preallocated_new_func != null) { + if (is_comptime and sema.preallocated_new_func != .none) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) { @@ -9392,7 +9405,7 @@ fn zirParam( assert(sema.inst_map.remove(inst)); } - if (sema.preallocated_new_func != null) { + if (sema.preallocated_new_func != .none) { if (try sema.typeHasOnePossibleValue(param_ty)) |opv| { // In this case we are instantiating a generic function call with a non-comptime // non-anytype parameter that ended up being a one-possible-type. @@ -9640,8 +9653,8 @@ fn intCast( if (wanted_bits == 0) { const ok = if (is_vector) ok: { - const zeros = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); - const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros); + const zeros = try sema.splat(operand_ty, try mod.intValue(operand_scalar_ty, 0)); + const zero_inst = try sema.addConstant(operand_ty, zeros); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ .tag = .reduce, @@ -9649,7 +9662,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(sema.typeOf(operand), try mod.intValue(operand_ty, 0)); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; @@ -9673,10 +9686,7 @@ fn intCast( // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_ty); - const dest_max_val = if (is_vector) - try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) - else - dest_max_val_scalar; + const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); const dest_max = try sema.addConstant(operand_ty, dest_max_val); const diff = try block.addBinOp(.subwrap, dest_max, operand); @@ -9732,7 +9742,8 @@ fn intCast( // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, try mod.intValue(operand_scalar_ty, 0)); + const scalar_zero = try mod.intValue(operand_scalar_ty, 0); + const zero_val = try sema.splat(operand_ty, scalar_zero); const zero_inst = try sema.addConstant(operand_ty, zero_val); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); const all_in_range = try block.addInst(.{ @@ -10139,17 +10150,18 @@ fn zirSwitchCapture( .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = union_val, - .container_ty = operand_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = union_val.ip_index, + .index = field_index, + } }, + } })).toValue()); } - const tag_and_val = union_val.castTag(.@"union").?.data; - return sema.addConstant(field_ty, tag_and_val.val); + return sema.addConstant( + field_ty, + mod.intern_pool.indexToKey(union_val.ip_index).un.val.toValue(), + ); } if (is_ref) { const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ @@ -10243,14 +10255,13 @@ fn zirSwitchCapture( }); if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { - return sema.addConstant( - field_ty_ptr, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = op_ptr_val, - .container_ty = operand_ty, - .field_index = first_field_index, - }), - ); + return sema.addConstant(field_ty_ptr, (try mod.intern(.{ .ptr = .{ + .ty = field_ty_ptr.ip_index, + .addr = .{ .field = .{ + .base = op_ptr_val.ip_index, + .index = first_field_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, operand_src, null); return block.addStructFieldPtr(operand_ptr, first_field_index, field_ty_ptr); @@ -10273,7 +10284,7 @@ fn zirSwitchCapture( const item_ref = try sema.resolveInst(item); // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError().?); + const name_ip = try mod.intern_pool.getOrPutString(gpa, item_val.getError(mod).?); names.putAssumeCapacityNoClobber(name_ip, {}); } const else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); @@ -10284,7 +10295,7 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const item_ty = try mod.singleErrorSetType(item_val.getError().?); + const item_ty = try mod.singleErrorSetType(item_val.getError(mod).?); return sema.bitCast(block, item_ty, operand, operand_src, null); } }, @@ -10809,10 +10820,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError check_range: { if (operand_ty.zigTypeTag(mod) == .Int) { - var arena = std.heap.ArenaAllocator.init(gpa); - defer arena.deinit(); - - const min_int = try operand_ty.minInt(arena.allocator(), mod); + const min_int = try operand_ty.minInt(mod); const max_int = try operand_ty.maxIntScalar(mod, Type.comptime_int); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { @@ -11493,8 +11501,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (seen_errors.contains(error_name)) continue; cases_len += 1; - const item_val = try Value.Tag.@"error".create(sema.arena, .{ .name = error_name }); - const item_ref = try sema.addConstant(operand_ty, item_val); + const item_val = try mod.intern(.{ .err = .{ + .ty = operand_ty.ip_index, + .name = error_name_ip, + } }); + const item_ref = try sema.addConstant(operand_ty, item_val.toValue()); case_block.inline_case_capture = item_ref; case_block.instructions.shrinkRetainingCapacity(0); @@ -11665,7 +11676,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; - const min = try ty.minInt(sema.arena, mod); + const min = try ty.minInt(mod); const max = try ty.maxIntScalar(mod, Type.comptime_int); return RangeSetUnhandledIterator{ @@ -11788,9 +11799,10 @@ fn validateSwitchItemError( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { + const ip = &sema.mod.intern_pool; const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); // TODO: Do i need to typecheck here? - const error_name = item_tv.val.castTag(.@"error").?.data.name; + const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.ip_index).err.name); const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev| prev.value else @@ -11983,7 +11995,7 @@ fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Ind } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { if (!operand_ty.isError(mod)) return; - if (val.getError() == null) return; + if (val.getError(mod) == null) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } } @@ -12005,7 +12017,7 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I const src = inst_data.src(); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.getError()) |name| { + if (val.getError(sema.mod)) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } } @@ -12172,11 +12184,11 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R // Return the error code from the function. const kv = try mod.getErrorValue(err_name); - const result_inst = try sema.addConstant( - try mod.singleErrorSetType(kv.key), - try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - ); - return result_inst; + const error_set_type = try mod.singleErrorSetType(kv.key); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = mod.intern_pool.getString(kv.key).unwrap().?, + } })).toValue()); } fn zirShl( @@ -12301,7 +12313,7 @@ fn zirShl( { const max_int = try sema.addConstant( lhs_ty, - try lhs_ty.maxInt(sema.arena, mod, lhs_ty), + try lhs_ty.maxInt(mod, lhs_ty), ); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); @@ -12316,7 +12328,7 @@ fn zirShl( if (!std.math.isPowerOfTwo(bit_count)) { const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { - const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); + const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, @@ -12466,7 +12478,7 @@ fn zirShr( const bit_count_val = try mod.intValue(scalar_ty, bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { - const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); + const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, @@ -13179,11 +13191,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs); } - const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) - else - try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); - + const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13203,11 +13211,7 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}), } - const lhs = if (rhs_ty.zigTypeTag(mod) == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, try mod.intValue(rhs_scalar_ty, 0))) - else - try sema.addConstant(rhs_ty, try mod.intValue(rhs_ty, 0)); - + const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13254,8 +13258,6 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13325,9 +13327,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13427,8 +13427,6 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13469,9 +13467,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13555,7 +13551,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else => unreachable, }; if (resolved_type.zigTypeTag(mod) == .Vector) { - const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); + const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); break :ok try block.addInst(.{ @@ -13600,8 +13596,6 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13644,9 +13638,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13721,8 +13713,6 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -13765,9 +13755,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } @@ -13843,12 +13831,9 @@ fn addDivIntOverflowSafety( return; } - const min_int = try resolved_type.minInt(sema.arena, mod); + const min_int = try resolved_type.minInt(mod); const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); - const neg_one = if (resolved_type.zigTypeTag(mod) == .Vector) - try Value.Tag.repeated.create(sema.arena, neg_one_scalar) - else - neg_one_scalar; + const neg_one = try sema.splat(resolved_type, neg_one_scalar); // If the LHS is comptime-known to be not equal to the min int, // no overflow is possible. @@ -13924,7 +13909,7 @@ fn addDivByZeroSafety( else try mod.floatValue(resolved_type.scalarType(mod), 0); const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, scalar_zero); + const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); break :ok try block.addInst(.{ @@ -14012,9 +13997,10 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{ + .ty = resolved_type.ip_index, + .storage = .{ .repeated_elem = scalar_zero.ip_index }, + } })).toValue() else scalar_zero; return sema.addConstant(resolved_type, zero_val); } } else if (lhs_scalar_ty.isSignedInt(mod)) { @@ -14399,12 +14385,12 @@ fn zirOverflowArithmetic( // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14425,7 +14411,7 @@ fn zirOverflowArithmetic( if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { if (lhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; @@ -14444,9 +14430,9 @@ fn zirOverflowArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; - } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; + } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; } } } @@ -14454,9 +14440,9 @@ fn zirOverflowArithmetic( if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod)) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = rhs }; - } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, scalar_one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = rhs }; + } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } } @@ -14478,12 +14464,12 @@ fn zirOverflowArithmetic( // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(dest_ty, zero), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { @@ -14544,10 +14530,14 @@ fn zirOverflowArithmetic( return block.addAggregateInit(tuple_ty, element_refs); } -fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { +fn splat(sema: *Sema, ty: Type, val: Value) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) != .Vector) return val; - return Value.Tag.repeated.create(sema.arena, val); + const repeated = try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = val.ip_index }, + } }); + return repeated.toValue(); } fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { @@ -14603,8 +14593,6 @@ fn analyzeArithmetic( .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag(mod) == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -14853,9 +14841,7 @@ fn analyzeArithmetic( } else if (resolved_type.isAnyFloat()) { break :lz; } - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14886,9 +14872,7 @@ fn analyzeArithmetic( } else if (resolved_type.isAnyFloat()) { break :rz; } - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14931,9 +14915,7 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14947,9 +14929,7 @@ fn analyzeArithmetic( return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14979,9 +14959,7 @@ fn analyzeArithmetic( if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -14994,9 +14972,7 @@ fn analyzeArithmetic( return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, scalar_zero); - } else scalar_zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { @@ -15138,7 +15114,7 @@ fn analyzePtrArithmetic( if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(ptr_ty, sema.arena, offset_int, sema.mod); + const new_ptr_val = try ptr_val.elemPtr(ptr_ty, offset_int, sema.mod); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -15184,7 +15160,7 @@ fn zirAsm( const inputs_len = @truncate(u5, extended.small >> 5); const clobbers_len = @truncate(u5, extended.small >> 10); const is_volatile = @truncate(u1, extended.small >> 15) != 0; - const is_global_assembly = sema.func == null; + const is_global_assembly = sema.func_index == .none; const asm_source: []const u8 = if (tmpl_is_expr) blk: { const tmpl = @intToEnum(Zir.Inst.Ref, extra.data.asm_source); @@ -15387,12 +15363,7 @@ fn zirCmpEq( if (lval.isUndef(mod) or rval.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, - // or calling to Module.getErrorValue to get the values and then compare them is - // faster. - const lhs_name = lval.castTag(.@"error").?.data.name; - const rhs_name = rval.castTag(.@"error").?.data.name; - if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) { + if (lval.toIntern() == rval.toIntern()) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -15650,8 +15621,8 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .AnyFrame, => {}, } - const val = try ty.lazyAbiSize(mod, sema.arena); - if (val.isLazySize()) { + const val = try ty.lazyAbiSize(mod); + if (val.isLazySize(mod)) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); @@ -15760,11 +15731,11 @@ fn zirClosureGet( scope = scope.parent.?; }; - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func == null) { + if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func_index == .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { + const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15788,11 +15759,11 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { + if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); - const tree = file.getTree(mod.gpa) catch |err| { + const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15868,14 +15839,17 @@ fn zirBuiltinSrc( const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = std.mem.span(fn_owner_decl.name); + const name = mem.span(fn_owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); const new_decl = try anon_decl.finish( try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod), try Value.Tag.bytes.create(anon_decl.arena(), bytes), 0, // default alignment ); - break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :blk try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_sentinel_0_type, + .addr = .{ .decl = new_decl }, + } }); }; const file_name_val = blk: { @@ -15888,27 +15862,35 @@ fn zirBuiltinSrc( try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), 0, // default alignment ); - break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :blk try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_sentinel_0_type, + .addr = .{ .decl = new_decl }, + } }); }; - const field_values = try sema.arena.alloc(Value, 4); - // file: [:0]const u8, - field_values[0] = file_name_val; - // fn_name: [:0]const u8, - field_values[1] = func_name_val; - // line: u32 - field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try mod.intValue(Type.u32, extra.line + 1)); - // column: u32, - field_values[3] = try mod.intValue(Type.u32, extra.column + 1); - - return sema.addConstant( - try sema.getBuiltinType("SourceLocation"), - try Value.Tag.aggregate.create(sema.arena, field_values), - ); + const src_loc_ty = try sema.getBuiltinType("SourceLocation"); + const fields = .{ + // file: [:0]const u8, + file_name_val, + // fn_name: [:0]const u8, + func_name_val, + // line: u32, + try mod.intern(.{ .runtime_value = .{ + .ty = .u32_type, + .val = (try mod.intValue(Type.u32, extra.line + 1)).ip_index, + } }), + // column: u32, + (try mod.intValue(Type.u32, extra.column + 1)).ip_index, + }; + return sema.addConstant(src_loc_ty, (try mod.intern(.{ .aggregate = .{ + .ty = src_loc_ty.ip_index, + .storage = .{ .elems = &fields }, + } })).toValue()); } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -15916,69 +15898,20 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const type_info_tag_ty = type_info_ty.unionTagType(mod).?; switch (ty.zigTypeTag(mod)) { - .Type => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Type)), - .val = Value.void, - }), - ), - .Void => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Void)), - .val = Value.void, - }), - ), - .Bool => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Bool)), - .val = Value.void, - }), - ), - .NoReturn => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.NoReturn)), - .val = Value.void, - }), - ), - .ComptimeFloat => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeFloat)), - .val = Value.void, - }), - ), - .ComptimeInt => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ComptimeInt)), - .val = Value.void, - }), - ), - .Undefined => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Undefined)), - .val = Value.void, - }), - ), - .Null => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Null)), - .val = Value.void, - }), - ), - .EnumLiteral => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.EnumLiteral)), - .val = Value.void, - }), - ), + .Type, + .Void, + .Bool, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .EnumLiteral, + => |type_info_tag| return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).ip_index, + .val = .void_value, + } })).toValue()), .Fn => { // TODO: look into memoizing this result. const info = mod.typeToFunc(ty).?; @@ -15986,11 +15919,34 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var params_anon_decl = try block.startAnonDecl(); defer params_anon_decl.deinit(); - const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len); + const fn_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Fn", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); + try sema.ensureDeclAnalyzed(fn_info_decl_index); + const fn_info_decl = mod.declPtr(fn_info_decl_index); + const fn_info_ty = fn_info_decl.val.toType(); + + const param_info_decl_index = (try sema.namespaceLookup( + block, + src, + fn_info_ty.getNamespaceIndex(mod).unwrap().?, + "Param", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); + try sema.ensureDeclAnalyzed(param_info_decl_index); + const param_info_decl = mod.declPtr(param_info_decl_index); + const param_info_ty = param_info_decl.val.toType(); + + const param_vals = try gpa.alloc(InternPool.Index, info.param_types.len); + defer gpa.free(param_vals); for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| { const is_generic = param_ty == .generic_poison_type; - const param_ty_val = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ - .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + const param_ty_val = try mod.intern_pool.get(gpa, .{ .opt = .{ + .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), .val = if (is_generic) .none else param_ty, } }); @@ -15999,87 +15955,74 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :blk @truncate(u1, info.noalias_bits >> index) != 0; }; - const param_fields = try params_anon_decl.arena().create([3]Value); - param_fields.* = .{ + const param_fields = .{ // is_generic: bool, - Value.makeBool(is_generic), + Value.makeBool(is_generic).ip_index, // is_noalias: bool, - Value.makeBool(is_noalias), + Value.makeBool(is_noalias).ip_index, // type: ?type, - param_ty_val.toValue(), + param_ty_val, }; - param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields); + param_val.* = try mod.intern(.{ .aggregate = .{ + .ty = param_info_ty.ip_index, + .storage = .{ .elems = ¶m_fields }, + } }); } const args_val = v: { - const fn_info_decl_index = (try sema.namespaceLookup( - block, - src, - type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Fn", - )).?; - try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); - try sema.ensureDeclAnalyzed(fn_info_decl_index); - const fn_info_decl = mod.declPtr(fn_info_decl_index); - const fn_ty = fn_info_decl.val.toType(); - const param_info_decl_index = (try sema.namespaceLookup( - block, - src, - fn_ty.getNamespaceIndex(mod).unwrap().?, - "Param", - )).?; - try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); - try sema.ensureDeclAnalyzed(param_info_decl_index); - const param_info_decl = mod.declPtr(param_info_decl_index); - const param_ty = param_info_decl.val.toType(); + const args_slice_ty = try mod.ptrType(.{ + .elem_type = param_info_ty.ip_index, + .size = .Slice, + .is_const = true, + }); const new_decl = try params_anon_decl.finish( try mod.arrayType(.{ .len = param_vals.len, - .child = param_ty.ip_index, + .child = param_info_ty.ip_index, .sentinel = .none, }), - try Value.Tag.aggregate.create( - params_anon_decl.arena(), - param_vals, - ), + (try mod.intern(.{ .aggregate = .{ + .ty = args_slice_ty.ip_index, + .storage = .{ .elems = param_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, param_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = args_slice_ty.ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, param_vals.len)).ip_index, + } }); }; - const ret_ty_opt = try mod.intern_pool.get(mod.gpa, .{ .opt = .{ - .ty = try mod.intern_pool.get(mod.gpa, .{ .opt_type = .type_type }), + const ret_ty_opt = try mod.intern(.{ .opt = .{ + .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), .val = if (info.return_type == .generic_poison_type) .none else info.return_type, } }); const callconv_ty = try sema.getBuiltinType("CallingConvention"); - const field_values = try sema.arena.create([6]Value); - field_values.* = .{ + const field_values = .{ // calling_convention: CallingConvention, - try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc)), + (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).ip_index, // alignment: comptime_int, - try mod.intValue(Type.comptime_int, ty.abiAlignment(mod)), + (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).ip_index, // is_generic: bool, - Value.makeBool(info.is_generic), + Value.makeBool(info.is_generic).ip_index, // is_var_args: bool, - Value.makeBool(info.is_var_args), + Value.makeBool(info.is_var_args).ip_index, // return_type: ?type, - ret_ty_opt.toValue(), + ret_ty_opt, // args: []const Fn.Param, args_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = fn_info_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Int => { const signedness_ty = try sema.getBuiltinType("Signedness"); @@ -16099,24 +16042,36 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ); }, .Float => { - const field_values = try sema.arena.alloc(Value, 1); - // bits: u16, - field_values[0] = try mod.intValue(Type.u16, ty.bitSize(mod)); - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const float_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Float", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index); + try sema.ensureDeclAnalyzed(float_info_decl_index); + const float_info_decl = mod.declPtr(float_info_decl_index); + const float_ty = float_info_decl.val.toType(); + + const field_vals = .{ + // bits: u16, + (try mod.intValue(Type.u16, ty.bitSize(mod))).ip_index, + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = float_ty.ip_index, + .storage = .{ .elems = &field_vals }, + } }), + } })).toValue()); }, .Pointer => { const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) try mod.intValue(Type.comptime_int, info.@"align") else - try info.pointee_type.lazyAbiAlignment(mod, sema.arena); + try info.pointee_type.lazyAbiAlignment(mod); const addrspace_ty = try sema.getBuiltinType("AddressSpace"); const pointer_ty = t: { @@ -16245,9 +16200,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise - const error_field_vals: ?[]Value = if (ty.isAnyError(mod)) null else blk: { + const error_field_vals = if (ty.isAnyError(mod)) null else blk: { const names = ty.errorSetNames(mod); - const vals = try fields_anon_decl.arena().alloc(Value, names.len); + const vals = try gpa.alloc(InternPool.Index, names.len); + defer gpa.free(vals); for (vals, names) |*field_val, name_ip| { const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { @@ -16259,70 +16215,91 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + } }); }; - const error_field_fields = try fields_anon_decl.arena().create([1]Value); - error_field_fields.* = .{ + const error_field_fields = .{ // name: []const u8, name_val, }; - - field_val.* = try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - error_field_fields, - ); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = error_field_ty.ip_index, + .storage = .{ .elems = &error_field_fields }, + } }); } break :blk vals; }; // Build our ?[]const Error value - const errors_val = if (error_field_vals) |vals| v: { + const slice_errors_ty = try mod.ptrType(.{ + .elem_type = error_field_ty.ip_index, + .size = .Slice, + .is_const = true, + }); + const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.ip_index); + const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { + const array_errors_ty = try mod.arrayType(.{ + .len = vals.len, + .child = error_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = vals.len, - .child = error_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - vals, - ), + array_errors_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_errors_ty.ip_index, + .storage = .{ .elems = vals }, + } })).toValue(), 0, // default alignment ); - - const new_decl_val = try Value.Tag.decl_ref.create(sema.arena, new_decl); - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = new_decl_val, - .len = try mod.intValue(Type.usize, vals.len), - }); - break :v try Value.Tag.opt_payload.create(sema.arena, slice_val); - } else Value.null; + break :v try mod.intern(.{ .ptr = .{ + .ty = slice_errors_ty.ip_index, + .addr = .{ .decl = new_decl }, + } }); + } else .none; + const errors_val = try mod.intern(.{ .opt = .{ + .ty = opt_slice_errors_ty.ip_index, + .val = errors_payload_val, + } }); // Construct Type{ .ErrorSet = errors_val } - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet)), - .val = errors_val, - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).ip_index, + .val = errors_val, + } })).toValue()); }, .ErrorUnion => { - const field_values = try sema.arena.alloc(Value, 2); - // error_set: type, - field_values[0] = ty.errorUnionSet(mod).toValue(); - // payload: type, - field_values[1] = ty.errorUnionPayload(mod).toValue(); + const error_union_field_ty = t: { + const error_union_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "ErrorUnion", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, error_union_field_ty_decl_index); + try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); + const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index); + break :t error_union_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // error_set: type, + ty.errorUnionSet(mod).ip_index, + // payload: type, + ty.errorUnionPayload(mod).ip_index, + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = error_union_field_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Enum => { // TODO: look into memoizing this result. @@ -16346,7 +16323,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t enum_field_ty_decl.val.toType(); }; - const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_type.names.len); + const enum_field_vals = try gpa.alloc(InternPool.Index, enum_type.names.len); + defer gpa.free(enum_field_vals); for (enum_field_vals, 0..) |*field_val, i| { const name_ip = enum_type.names[i]; @@ -16360,56 +16338,81 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + } }); }; - const enum_field_fields = try fields_anon_decl.arena().create([2]Value); - enum_field_fields.* = .{ + const enum_field_fields = .{ // name: []const u8, name_val, // value: comptime_int, - try mod.intValue(Type.comptime_int, i), + (try mod.intValue(Type.comptime_int, i)).ip_index, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), enum_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = enum_field_ty.ip_index, + .storage = .{ .elems = &enum_field_fields }, + } }); } const fields_val = v: { + const fields_array_ty = try mod.arrayType(.{ + .len = enum_field_vals.len, + .child = enum_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = enum_field_vals.len, - .child = enum_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - enum_field_vals, - ), + fields_array_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = fields_array_ty.ip_index, + .storage = .{ .elems = enum_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = enum_field_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, enum_type.namespace); - const field_values = try sema.arena.create([4]Value); - field_values.* = .{ + const type_enum_ty = t: { + const type_enum_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Enum", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_enum_ty_decl_index); + try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); + const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index); + break :t type_enum_ty_decl.val.toType(); + }; + + const field_values = .{ // tag_type: type, - enum_type.tag_ty.toValue(), + enum_type.tag_ty, // fields: []const EnumField, fields_val, // decls: []const Declaration, decls_val, // is_exhaustive: bool, - is_exhaustive, + is_exhaustive.ip_index, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_enum_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Union => { // TODO: look into memoizing this result. @@ -16417,6 +16420,19 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); + const type_union_ty = t: { + const type_union_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Union", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_union_ty_decl_index); + try sema.ensureDeclAnalyzed(type_union_ty_decl_index); + const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index); + break :t type_union_ty_decl.val.toType(); + }; + const union_field_ty = t: { const union_field_ty_decl_index = (try sema.namespaceLookup( block, @@ -16435,7 +16451,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const layout = union_ty.containerLayout(mod); const union_fields = union_ty.unionFields(mod); - const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count()); + const union_field_vals = try gpa.alloc(InternPool.Index, union_fields.count()); + defer gpa.free(union_field_vals); for (union_field_vals, 0..) |*field_val, i| { const field = union_fields.values()[i]; @@ -16449,51 +16466,62 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + } }); }; - const union_field_fields = try fields_anon_decl.arena().create([3]Value); const alignment = switch (layout) { .Auto, .Extern => try sema.unionFieldAlignment(field), .Packed => 0, }; - union_field_fields.* = .{ + const union_field_fields = .{ // name: []const u8, name_val, // type: type, - field.ty.toValue(), + field.ty.ip_index, // alignment: comptime_int, - try mod.intValue(Type.comptime_int, alignment), + (try mod.intValue(Type.comptime_int, alignment)).ip_index, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), union_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = union_field_ty.ip_index, + .storage = .{ .elems = &union_field_fields }, + } }); } const fields_val = v: { + const array_fields_ty = try mod.arrayType(.{ + .len = union_field_vals.len, + .child = union_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = union_field_vals.len, - .child = union_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - try fields_anon_decl.arena().dupe(Value, union_field_vals), - ), + array_fields_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_fields_ty.ip_index, + .storage = .{ .elems = union_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, union_field_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = union_field_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, union_field_vals.len)).ip_index, + } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); - const enum_tag_ty_val = if (union_ty.unionTagType(mod)) |tag_ty| v: { - const ty_val = tag_ty.toValue(); - break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); - } else Value.null; + const enum_tag_ty_val = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(.type_type)).ip_index, + .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.ip_index else .none, + } }); const container_layout_ty = t: { const decl_index = (try sema.namespaceLookup( @@ -16508,10 +16536,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t decl.val.toType(); }; - const field_values = try sema.arena.create([4]Value); - field_values.* = .{ + const field_values = .{ // layout: ContainerLayout, - try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)), + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, // tag_type: ?type, enum_tag_ty_val, @@ -16520,14 +16547,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_union_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Struct => { // TODO: look into memoizing this result. @@ -16535,6 +16562,19 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); + const type_struct_ty = t: { + const type_struct_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Struct", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_struct_ty_decl_index); + try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); + const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index); + break :t type_struct_ty_decl.val.toType(); + }; + const struct_field_ty = t: { const struct_field_ty_decl_index = (try sema.namespaceLookup( block, @@ -16547,14 +16587,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); break :t struct_field_ty_decl.val.toType(); }; + const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout const layout = struct_ty.containerLayout(mod); - const struct_field_vals = fv: { + var struct_field_vals: []InternPool.Index = &.{}; + defer gpa.free(struct_field_vals); + fv: { const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { .anon_struct_type => |tuple| { - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, tuple.types.len); + struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); for ( tuple.types, tuple.values, @@ -16574,38 +16617,40 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes.ptr[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + } }); }; - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); const is_comptime = field_val != .none; const opt_default_val = if (is_comptime) field_val.toValue() else null; const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val); - struct_field_fields.* = .{ + const struct_field_fields = .{ // name: []const u8, name_val, // type: type, - field_ty.toValue(), + field_ty, // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), + default_val_ptr.ip_index, // is_comptime: bool, - Value.makeBool(is_comptime), + Value.makeBool(is_comptime).ip_index, // alignment: comptime_int, - try field_ty.toType().lazyAbiAlignment(mod, fields_anon_decl.arena()), + (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).ip_index, }; - struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); + struct_field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = struct_field_ty.ip_index, + .storage = .{ .elems = &struct_field_fields }, + } }); } - break :fv struct_field_vals; + break :fv; }, .struct_type => |s| s, else => unreachable, }; - const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse - break :fv &[0]Value{}; - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_obj.fields.count()); + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :fv; + struct_field_vals = try gpa.alloc(InternPool.Index, struct_obj.fields.count()); for ( struct_field_vals, @@ -16621,13 +16666,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + } }); }; - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); const opt_default_val = if (field.default_val.ip_index == .unreachable_value) null else @@ -16635,55 +16680,61 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); const alignment = field.alignment(mod, layout); - struct_field_fields.* = .{ + const struct_field_fields = .{ // name: []const u8, name_val, // type: type, - field.ty.toValue(), + field.ty.ip_index, // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), + default_val_ptr.ip_index, // is_comptime: bool, - Value.makeBool(field.is_comptime), + Value.makeBool(field.is_comptime).ip_index, // alignment: comptime_int, - try mod.intValue(Type.comptime_int, alignment), + (try mod.intValue(Type.comptime_int, alignment)).ip_index, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = struct_field_ty.ip_index, + .storage = .{ .elems = &struct_field_fields }, + } }); } - break :fv struct_field_vals; - }; + } const fields_val = v: { + const array_fields_ty = try mod.arrayType(.{ + .len = struct_field_vals.len, + .child = struct_field_ty.ip_index, + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try mod.arrayType(.{ - .len = struct_field_vals.len, - .child = struct_field_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - try fields_anon_decl.arena().dupe(Value, struct_field_vals), - ), + array_fields_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_fields_ty.ip_index, + .storage = .{ .elems = struct_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, struct_field_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = struct_field_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, struct_field_vals.len)).ip_index, + } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod)); - const backing_integer_val = blk: { - if (layout == .Packed) { + const backing_integer_val = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(.type_type)).ip_index, + .val = if (layout == .Packed) val: { const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); assert(struct_obj.backing_int_ty.isInt(mod)); - const backing_int_ty_val = struct_obj.backing_int_ty.toValue(); - break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); - } else { - break :blk Value.null; - } - }; + break :val struct_obj.backing_int_ty.ip_index; + } else .none, + } }); const container_layout_ty = t: { const decl_index = (try sema.namespaceLookup( @@ -16698,10 +16749,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t decl.val.toType(); }; - const field_values = try sema.arena.create([5]Value); - field_values.* = .{ + const field_values = [_]InternPool.Index{ // layout: ContainerLayout, - try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout)), + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -16709,36 +16759,48 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_tuple: bool, - Value.makeBool(struct_ty.isTuple(mod)), + Value.makeBool(struct_ty.isTuple(mod)).ip_index, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_struct_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Opaque => { // TODO: look into memoizing this result. + const type_opaque_ty = t: { + const type_opaque_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Opaque", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_opaque_ty_decl_index); + try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); + const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index); + break :t type_opaque_ty_decl.val.toType(); + }; + const opaque_ty = try sema.resolveTypeFields(ty); const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespaceIndex(mod)); - const field_values = try sema.arena.create([1]Value); - field_values.* = .{ + const field_values = .{ // decls: []const Declaration, decls_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.ip_index, + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).ip_index, + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_opaque_ty.ip_index, + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Frame => return sema.failWithUseOfAsync(block, src), .AnyFrame => return sema.failWithUseOfAsync(block, src), @@ -16751,7 +16813,7 @@ fn typeInfoDecls( src: LazySrcLoc, type_info_ty: Type, opt_namespace: Module.Namespace.OptionalIndex, -) CompileError!Value { +) CompileError!InternPool.Index { const mod = sema.mod; var decls_anon_decl = try block.startAnonDecl(); defer decls_anon_decl.deinit(); @@ -16770,7 +16832,7 @@ fn typeInfoDecls( }; try sema.queueFullTypeResolution(declaration_ty); - var decl_vals = std.ArrayList(Value).init(sema.gpa); + var decl_vals = std.ArrayList(InternPool.Index).init(sema.gpa); defer decl_vals.deinit(); var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa); @@ -16778,33 +16840,39 @@ fn typeInfoDecls( if (opt_namespace.unwrap()) |namespace_index| { const namespace = mod.namespacePtr(namespace_index); - try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), namespace, &decl_vals, &seen_namespaces); + try sema.typeInfoNamespaceDecls(block, namespace, declaration_ty, &decl_vals, &seen_namespaces); } + const array_decl_ty = try mod.arrayType(.{ + .len = decl_vals.items.len, + .child = declaration_ty.ip_index, + .sentinel = .none, + }); const new_decl = try decls_anon_decl.finish( - try mod.arrayType(.{ - .len = decl_vals.items.len, - .child = declaration_ty.ip_index, - .sentinel = .none, - }), - try Value.Tag.aggregate.create( - decls_anon_decl.arena(), - try decls_anon_decl.arena().dupe(Value, decl_vals.items), - ), + array_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_decl_ty.ip_index, + .storage = .{ .elems = decl_vals.items }, + } })).toValue(), 0, // default alignment ); - return try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try mod.intValue(Type.usize, decl_vals.items.len), - }); + return try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .elem_type = declaration_ty.ip_index, + .size = .Slice, + .is_const = true, + })).ip_index, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, decl_vals.items.len)).ip_index, + } }); } fn typeInfoNamespaceDecls( sema: *Sema, block: *Block, - decls_anon_decl: Allocator, namespace: *Namespace, - decl_vals: *std.ArrayList(Value), + declaration_ty: Type, + decl_vals: *std.ArrayList(InternPool.Index), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { const mod = sema.mod; @@ -16817,7 +16885,7 @@ fn typeInfoNamespaceDecls( if (decl.analysis == .in_progress) continue; try mod.ensureDeclAnalyzed(decl_index); const new_ns = decl.val.toType().getNamespace(mod).?; - try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); + try sema.typeInfoNamespaceDecls(block, new_ns, declaration_ty, decl_vals, seen_namespaces); continue; } if (decl.kind != .named) continue; @@ -16830,20 +16898,23 @@ fn typeInfoNamespaceDecls( try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), 0, // default alignment ); - break :v try Value.Tag.slice.create(decls_anon_decl, .{ - .ptr = try Value.Tag.decl_ref.create(decls_anon_decl, new_decl), - .len = try mod.intValue(Type.usize, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + } }); }; - const fields = try decls_anon_decl.create([2]Value); - fields.* = .{ + const fields = .{ //name: []const u8, name_val, //is_pub: bool, - Value.makeBool(decl.is_pub), + Value.makeBool(decl.is_pub).ip_index, }; - try decl_vals.append(try Value.Tag.aggregate.create(decls_anon_decl, fields)); + try decl_vals.append(try mod.intern(.{ .aggregate = .{ + .ty = declaration_ty.ip_index, + .storage = .{ .elems = &fields }, + } })); } } @@ -17454,10 +17525,11 @@ fn zirRetErrValue( // Return the error code from the function. const kv = try mod.getErrorValue(err_name); - const result_inst = try sema.addConstant( - try mod.singleErrorSetType(err_name), - try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - ); + const error_set_type = try mod.singleErrorSetType(err_name); + const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), + } })).toValue()); return sema.analyzeRet(block, result_inst, src); } @@ -17782,10 +17854,12 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known"); // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. - if (val.castTag(.lazy_align)) |payload| { - if (payload.data.eql(elem_ty, sema.mod)) { - break :blk .none; - } + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.ip_index) break :blk .none, + else => {}, + }, + else => {}, } const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); try sema.validateAlign(block, align_src, abi_align); @@ -17910,12 +17984,10 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); } } - if (obj_ty.sentinel(mod)) |sentinel| { - const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); - return sema.addConstant(obj_ty, val); - } else { - return sema.addConstant(obj_ty, Value.initTag(.empty_array)); - } + return sema.addConstant(obj_ty, (try mod.intern(.{ .aggregate = .{ + .ty = obj_ty.ip_index, + .storage = .{ .elems = &.{} }, + } })).toValue()); } fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -18679,8 +18751,8 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (ty.isNoReturn(mod)) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } - const val = try ty.lazyAbiAlignment(mod, sema.arena); - if (val.isLazyAlign()) { + const val = try ty.lazyAbiAlignment(mod); + if (val.isLazyAlign(mod)) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); @@ -18704,7 +18776,8 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - const bytes = val.castTag(.@"error").?.data.name; + const err_name = sema.mod.intern_pool.indexToKey(val.ip_index).err.name; + const bytes = sema.mod.intern_pool.stringToSlice(err_name); return sema.addStrLit(block, bytes); } @@ -18794,7 +18867,8 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); - const bytes = val.castTag(.enum_literal).?.data; + const tag_name = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const bytes = mod.intern_pool.stringToSlice(tag_name); return sema.addStrLit(block, bytes); }, .Enum => operand_ty, @@ -18883,11 +18957,8 @@ fn zirReify( .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const signedness_index = fields.getIndex("signedness").?; - const bits_index = fields.getIndex("bits").?; - - const signedness_val = try union_val.val.fieldValue(fields.values()[signedness_index].ty, mod, signedness_index); - const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index); + const signedness_val = try union_val.val.fieldValue(mod, fields.getIndex("signedness").?); + const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); @@ -18896,11 +18967,8 @@ fn zirReify( }, .Vector => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const len_index = fields.getIndex("len").?; - const child_index = fields.getIndex("child").?; - - const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index); - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); + const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const len = @intCast(u32, len_val.toUnsignedInt(mod)); const child_ty = child_val.toType(); @@ -18915,9 +18983,7 @@ fn zirReify( }, .Float => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const bits_index = fields.getIndex("bits").?; - - const bits_val = try union_val.val.fieldValue(fields.values()[bits_index].ty, mod, bits_index); + const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { @@ -18932,23 +18998,14 @@ fn zirReify( }, .Pointer => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const size_index = fields.getIndex("size").?; - const is_const_index = fields.getIndex("is_const").?; - const is_volatile_index = fields.getIndex("is_volatile").?; - const alignment_index = fields.getIndex("alignment").?; - const address_space_index = fields.getIndex("address_space").?; - const child_index = fields.getIndex("child").?; - const is_allowzero_index = fields.getIndex("is_allowzero").?; - const sentinel_index = fields.getIndex("sentinel").?; - - const size_val = try union_val.val.fieldValue(fields.values()[size_index].ty, mod, size_index); - const is_const_val = try union_val.val.fieldValue(fields.values()[is_const_index].ty, mod, is_const_index); - const is_volatile_val = try union_val.val.fieldValue(fields.values()[is_volatile_index].ty, mod, is_volatile_index); - const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index); - const address_space_val = try union_val.val.fieldValue(fields.values()[address_space_index].ty, mod, address_space_index); - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); - const is_allowzero_val = try union_val.val.fieldValue(fields.values()[is_allowzero_index].ty, mod, is_allowzero_index); - const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index); + const size_val = try union_val.val.fieldValue(mod, fields.getIndex("size").?); + const is_const_val = try union_val.val.fieldValue(mod, fields.getIndex("is_const").?); + const is_volatile_val = try union_val.val.fieldValue(mod, fields.getIndex("is_volatile").?); + const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); + const address_space_val = try union_val.val.fieldValue(mod, fields.getIndex("address_space").?); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); + const is_allowzero_val = try union_val.val.fieldValue(mod, fields.getIndex("is_allowzero").?); + const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); @@ -19032,22 +19089,18 @@ fn zirReify( }, .Array => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const len_index = fields.getIndex("len").?; - const child_index = fields.getIndex("child").?; - const sentinel_index = fields.getIndex("sentinel").?; - - const len_val = try union_val.val.fieldValue(fields.values()[len_index].ty, mod, len_index); - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); - const sentinel_val = try union_val.val.fieldValue(fields.values()[sentinel_index].ty, mod, sentinel_index); + const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); + const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); const len = len_val.toUnsignedInt(mod); const child_ty = child_val.toType(); - const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { + const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, .pointee_type = child_ty, }); - break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?; + break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?; } else null; const ty = try Type.array(sema.arena, len, sentinel, child_ty, mod); @@ -19055,9 +19108,7 @@ fn zirReify( }, .Optional => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const child_index = fields.getIndex("child").?; - - const child_val = try union_val.val.fieldValue(fields.values()[child_index].ty, mod, child_index); + const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const child_ty = child_val.toType(); @@ -19066,11 +19117,8 @@ fn zirReify( }, .ErrorUnion => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const error_set_index = fields.getIndex("error_set").?; - const payload_index = fields.getIndex("payload").?; - - const error_set_val = try union_val.val.fieldValue(fields.values()[error_set_index].ty, mod, error_set_index); - const payload_val = try union_val.val.fieldValue(fields.values()[payload_index].ty, mod, payload_index); + const error_set_val = try union_val.val.fieldValue(mod, fields.getIndex("error_set").?); + const payload_val = try union_val.val.fieldValue(mod, fields.getIndex("payload").?); const error_set_ty = error_set_val.toType(); const payload_ty = payload_val.toType(); @@ -19085,18 +19133,17 @@ fn zirReify( .ErrorSet => { const payload_val = union_val.val.optionalValue(mod) orelse return sema.addType(Type.anyerror); - const slice_val = payload_val.castTag(.slice).?.data; - const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod)); + const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod)); var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); for (0..len) |i| { - const elem_val = try slice_val.ptr.elemValue(mod, i); + const elem_val = try payload_val.elemValue(mod, i); const struct_val = elem_val.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // error_set: type, const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); + const name_str = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); const name_ip = try mod.intern_pool.getOrPutString(gpa, name_str); const gop = names.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { @@ -19109,17 +19156,11 @@ fn zirReify( }, .Struct => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const layout_index = fields.getIndex("layout").?; - const backing_integer_index = fields.getIndex("backing_integer").?; - const fields_index = fields.getIndex("fields").?; - const decls_index = fields.getIndex("decls").?; - const is_tuple_index = fields.getIndex("is_tuple").?; - - const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index); - const backing_integer_val = try union_val.val.fieldValue(fields.values()[backing_integer_index].ty, mod, backing_integer_index); - const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); - const is_tuple_val = try union_val.val.fieldValue(fields.values()[is_tuple_index].ty, mod, is_tuple_index); + const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); + const backing_integer_val = try union_val.val.fieldValue(mod, fields.getIndex("backing_integer").?); + const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); + const is_tuple_val = try union_val.val.fieldValue(mod, fields.getIndex("is_tuple").?); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -19136,15 +19177,10 @@ fn zirReify( }, .Enum => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const tag_type_index = fields.getIndex("tag_type").?; - const fields_index = fields.getIndex("fields").?; - const decls_index = fields.getIndex("decls").?; - const is_exhaustive_index = fields.getIndex("is_exhaustive").?; - - const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index); - const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); - const is_exhaustive_val = try union_val.val.fieldValue(fields.values()[is_exhaustive_index].ty, mod, is_exhaustive_index); + const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); + const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); + const is_exhaustive_val = try union_val.val.fieldValue(mod, fields.getIndex("is_exhaustive").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19195,7 +19231,7 @@ fn zirReify( const value_val = field_struct_val[1]; const field_name = try name_val.toAllocatedBytes( - Type.const_slice_u8, + Type.slice_const_u8, sema.arena, mod, ); @@ -19237,9 +19273,7 @@ fn zirReify( }, .Opaque => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const decls_index = fields.getIndex("decls").?; - - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19283,15 +19317,10 @@ fn zirReify( }, .Union => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const layout_index = fields.getIndex("layout").?; - const tag_type_index = fields.getIndex("tag_type").?; - const fields_index = fields.getIndex("fields").?; - const decls_index = fields.getIndex("decls").?; - - const layout_val = try union_val.val.fieldValue(fields.values()[layout_index].ty, mod, layout_index); - const tag_type_val = try union_val.val.fieldValue(fields.values()[tag_type_index].ty, mod, tag_type_index); - const fields_val = try union_val.val.fieldValue(fields.values()[fields_index].ty, mod, fields_index); - const decls_val = try union_val.val.fieldValue(fields.values()[decls_index].ty, mod, decls_index); + const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); + const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); + const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); + const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19386,7 +19415,7 @@ fn zirReify( const alignment_val = field_struct_val[2]; const field_name = try name_val.toAllocatedBytes( - Type.const_slice_u8, + Type.slice_const_u8, new_decl_arena_allocator, mod, ); @@ -19489,19 +19518,12 @@ fn zirReify( }, .Fn => { const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); - const calling_convention_index = fields.getIndex("calling_convention").?; - const alignment_index = fields.getIndex("alignment").?; - const is_generic_index = fields.getIndex("is_generic").?; - const is_var_args_index = fields.getIndex("is_var_args").?; - const return_type_index = fields.getIndex("return_type").?; - const params_index = fields.getIndex("params").?; - - const calling_convention_val = try union_val.val.fieldValue(fields.values()[calling_convention_index].ty, mod, calling_convention_index); - const alignment_val = try union_val.val.fieldValue(fields.values()[alignment_index].ty, mod, alignment_index); - const is_generic_val = try union_val.val.fieldValue(fields.values()[is_generic_index].ty, mod, is_generic_index); - const is_var_args_val = try union_val.val.fieldValue(fields.values()[is_var_args_index].ty, mod, is_var_args_index); - const return_type_val = try union_val.val.fieldValue(fields.values()[return_type_index].ty, mod, return_type_index); - const params_val = try union_val.val.fieldValue(fields.values()[params_index].ty, mod, params_index); + const calling_convention_val = try union_val.val.fieldValue(mod, fields.getIndex("calling_convention").?); + const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); + const is_generic_val = try union_val.val.fieldValue(mod, fields.getIndex("is_generic").?); + const is_var_args_val = try union_val.val.fieldValue(mod, fields.getIndex("is_var_args").?); + const return_type_val = try union_val.val.fieldValue(mod, fields.getIndex("return_type").?); + const params_val = try union_val.val.fieldValue(mod, fields.getIndex("params").?); const is_generic = is_generic_val.toBool(mod); if (is_generic) { @@ -19528,14 +19550,12 @@ fn zirReify( const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); - const args_slice_val = params_val.castTag(.slice).?.data; - const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod)); - + const args_len = try sema.usizeCast(block, src, params_val.sliceLen(mod)); const param_types = try sema.arena.alloc(InternPool.Index, args_len); var noalias_bits: u32 = 0; for (param_types, 0..) |*param_type, i| { - const arg = try args_slice_val.ptr.elemValue(mod, i); + const arg = try params_val.elemValue(mod, i); const arg_val = arg.castTag(.aggregate).?.data; // TODO use reflection instead of magic numbers here // is_generic: bool, @@ -19676,7 +19696,7 @@ fn reifyStruct( } const field_name = try name_val.toAllocatedBytes( - Type.const_slice_u8, + Type.slice_const_u8, new_decl_arena_allocator, mod, ); @@ -19707,7 +19727,7 @@ fn reifyStruct( } const default_val = if (default_value_val.optionalValue(mod)) |opt_val| blk: { - const payload_val = if (opt_val.pointerDecl()) |opt_decl| + const payload_val = if (opt_val.pointerDecl(mod)) |opt_decl| mod.declPtr(opt_decl).val else opt_val; @@ -20137,7 +20157,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (maybe_operand_val) |val| { if (!dest_ty.isAnyError(mod)) { - const error_name = val.castTag(.@"error").?.data.name; + const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.ip_index).err.name); if (!dest_ty.errorSetHasField(error_name, mod)) { const msg = msg: { const msg = try sema.errMsg( @@ -20279,7 +20299,10 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) { - return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val)); + return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ + .ty = dest_ty.ip_index, + .val = operand_val.toIntern(), + } })).toValue()); } return sema.addConstant(aligned_dest_ty, operand_val); } @@ -20944,7 +20967,7 @@ fn checkPtrIsNotComptimeMutable( operand_src: LazySrcLoc, ) CompileError!void { _ = operand_src; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(sema.mod)) { return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{}); } } @@ -20953,7 +20976,7 @@ fn checkComptimeVarStore( sema: *Sema, block: *Block, src: LazySrcLoc, - decl_ref_mut: Value.Payload.DeclRefMut.Data, + decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, ) CompileError!void { if (@enumToInt(decl_ref_mut.runtime_index) < @enumToInt(block.runtime_index)) { if (block.runtime_cond) |cond_src| { @@ -21159,7 +21182,7 @@ fn resolveExportOptions( const name_operand = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); - const name_ty = Type.const_slice_u8; + const name_ty = Type.slice_const_u8; const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); @@ -21168,7 +21191,7 @@ fn resolveExportOptions( const section_operand = try sema.fieldVal(block, src, options, "section", section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); - const section_ty = Type.const_slice_u8; + const section_ty = Type.slice_const_u8; const section = if (section_opt_val.optionalValue(mod)) |section_val| try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else @@ -21298,12 +21321,14 @@ fn zirCmpxchg( } const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; - const result_val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { - try sema.storePtr(block, src, ptr, new_value); - break :blk Value.null; - } else try Value.Tag.opt_payload.create(sema.arena, stored_val); - - return sema.addConstant(result_ty, result_val); + const result_val = try mod.intern(.{ .opt = .{ + .ty = result_ty.ip_index, + .val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { + try sema.storePtr(block, src, ptr, new_value); + break :blk .none; + } else stored_val.toIntern(), + } }); + return sema.addConstant(result_ty, result_val.toValue()); } else break :rs new_value_src; } else break :rs expected_src; } else ptr_src; @@ -21342,11 +21367,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty); - - return sema.addConstant( - vector_ty, - try Value.Tag.repeated.create(sema.arena, scalar_val), - ); + return sema.addConstant(vector_ty, try sema.splat(vector_ty, scalar_val)); } try sema.requireRuntimeBlock(block, inst_data.src(), scalar_src); @@ -21800,7 +21821,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(mod)) { const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { @@ -22081,10 +22102,15 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { - const payload = field_ptr_val.castTag(.field_ptr) orelse { - return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{}); - }; - if (payload.data.field_index != field_index) { + const field = switch (mod.intern_pool.indexToKey(field_ptr_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .field => |field| field, + else => null, + }, + else => null, + } orelse return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{}); + + if (field.index != field_index) { const msg = msg: { const msg = try sema.errMsg( block, @@ -22093,7 +22119,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr .{ field_name, field_index, - payload.data.field_index, + field.index, parent_ty.fmt(sema.mod), }, ); @@ -22103,7 +22129,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr }; return sema.failWithOwnedErrorMsg(msg); } - return sema.addConstant(result_ptr, payload.data.container_ptr); + return sema.addConstant(result_ptr, field.base.toValue()); } try sema.requireRuntimeBlock(block, src, ptr_src); @@ -22335,13 +22361,13 @@ fn analyzeMinMax( // Compute the final bounds based on the runtime type and the comptime-known bound type const min_val = switch (air_tag) { - .min => try unrefined_elem_ty.minInt(sema.arena, mod), - .max => try comptime_elem_ty.minInt(sema.arena, mod), // @max(ct, rt) >= ct + .min => try unrefined_elem_ty.minInt(mod), + .max => try comptime_elem_ty.minInt(mod), // @max(ct, rt) >= ct else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(sema.arena, mod, Type.comptime_int), + .min => try comptime_elem_ty.maxInt(mod, Type.comptime_int), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(mod, Type.comptime_int), else => unreachable, }; @@ -22464,7 +22490,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { - if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src; + if (!dest_ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); @@ -22618,7 +22644,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void return; } - if (!ptr_val.isComptimeMutablePtr()) break :rs dest_src; + if (!ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src; if (try sema.resolveMaybeUndefVal(uncoerced_elem)) |_| { for (0..len) |i| { const elem_index = try sema.addIntUnsigned(Type.usize, i); @@ -22696,6 +22722,7 @@ fn zirVarExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 }; const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 }; @@ -22737,32 +22764,17 @@ fn zirVarExtended( try sema.validateVarType(block, ty_src, var_ty, small.is_extern); - const new_var = try sema.gpa.create(Module.Var); - errdefer sema.gpa.destroy(new_var); - - log.debug("created variable {*} owner_decl: {*} ({s})", .{ - new_var, sema.owner_decl, sema.owner_decl.name, - }); - - new_var.* = .{ - .owner_decl = sema.owner_decl_index, - .init = init_val, + return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{ + .ty = var_ty.ip_index, + .init = init_val.toIntern(), + .decl = sema.owner_decl_index, + .lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString( + sema.gpa, + try sema.handleExternLibName(block, ty_src, lname), + )).toOptional() else .none, .is_extern = small.is_extern, - .is_mutable = true, .is_threadlocal = small.is_threadlocal, - .is_weak_linkage = false, - .lib_name = null, - }; - - if (lib_name) |lname| { - new_var.lib_name = try sema.handleExternLibName(block, ty_src, lname); - } - - const result = try sema.addConstant( - var_ty, - try Value.Tag.variable.create(sema.arena, new_var), - ); - return result; + } })).toValue()); } fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -22861,7 +22873,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; - const ty = Type.const_slice_u8; + const ty = Type.slice_const_u8; const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known"); if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; @@ -23133,10 +23145,10 @@ fn resolveExternOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExternOptions { + const mod = sema.mod; const options_inst = try sema.resolveInst(zir_ref); const extern_options_ty = try sema.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); - const mod = sema.mod; const name_src = sema.maybeOptionsSrc(block, src, "name"); const library_src = sema.maybeOptionsSrc(block, src, "library"); @@ -23145,7 +23157,7 @@ fn resolveExternOptions( const name_ref = try sema.fieldVal(block, src, options, "name", name_src); const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known"); - const name = try name_val.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); + const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src); const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known"); @@ -23157,9 +23169,8 @@ fn resolveExternOptions( const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); - const library_name = if (!library_name_val.isNull(mod)) blk: { - const payload = library_name_val.castTag(.opt_payload).?.data; - const library_name = try payload.toAllocatedBytes(Type.const_slice_u8, sema.arena, mod); + const library_name = if (library_name_val.optionalValue(mod)) |payload| blk: { + const library_name = try payload.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); if (library_name.len == 0) { return sema.fail(block, library_src, "library name cannot be empty", .{}); } @@ -23227,40 +23238,36 @@ fn zirBuiltinExtern( new_decl.name = try sema.gpa.dupeZ(u8, options.name); { - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const new_var = try new_decl_arena_allocator.create(Module.Var); - new_var.* = .{ - .owner_decl = sema.owner_decl_index, - .init = Value.@"unreachable", + const new_var = try mod.intern(.{ .variable = .{ + .ty = ty.ip_index, + .init = .none, + .decl = sema.owner_decl_index, .is_extern = true, - .is_mutable = false, + .is_const = true, .is_threadlocal = options.is_thread_local, .is_weak_linkage = options.linkage == .Weak, - .lib_name = null, - }; + } }); new_decl.src_line = sema.owner_decl.src_line; // We only access this decl through the decl_ref with the correct type created // below, so this type doesn't matter - new_decl.ty = Type.anyopaque; - new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var); + new_decl.ty = ty; + new_decl.val = new_var.toValue(); new_decl.@"align" = 0; new_decl.@"linksection" = null; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; - - try new_decl.finalizeNewArena(&new_decl_arena); } try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); try sema.ensureDeclAnalyzed(new_decl_index); - const ref = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); - return sema.addConstant(ty, ref); + const ref = try mod.intern(.{ .ptr = .{ + .ty = (try mod.singleConstPtrType(ty)).ip_index, + .addr = .{ .decl = new_decl_index }, + } }); + return sema.addConstant(ty, ref.toValue()); } fn zirWorkItem( @@ -24117,7 +24124,6 @@ fn fieldVal( const mod = sema.mod; const gpa = sema.gpa; - const arena = sema.arena; const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -24221,13 +24227,14 @@ fn fieldVal( else => unreachable, } - return sema.addConstant( - if (!child_type.isAnyError(mod)) - child_type - else - try mod.singleErrorSetTypeNts(name), - try Value.Tag.@"error".create(arena, .{ .name = ip.stringToSlice(name) }), - ); + const error_set_type = if (!child_type.isAnyError(mod)) + child_type + else + try mod.singleErrorSetTypeNts(name); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = name, + } })).toValue()); }, .Union => { if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { @@ -24368,14 +24375,13 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return sema.addConstant( - result_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = val, - .container_ty = inner_ty, - .field_index = Value.Payload.Slice.ptr_index, - }), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ + .ty = result_ty.ip_index, + .addr = .{ .field = .{ + .base = val.ip_index, + .index = Value.slice_ptr_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24389,14 +24395,13 @@ fn fieldPtr( }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return sema.addConstant( - result_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = val, - .container_ty = inner_ty, - .field_index = Value.Payload.Slice.len_index, - }), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ + .ty = result_ty.ip_index, + .addr = .{ .field = .{ + .base = val.ip_index, + .index = Value.slice_len_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24442,14 +24447,16 @@ fn fieldPtr( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); + const error_set_type = if (!child_type.isAnyError(mod)) + child_type + else + try mod.singleErrorSetTypeNts(name); return sema.analyzeDeclRef(try anon_decl.finish( - if (!child_type.isAnyError(mod)) - child_type - else - try mod.singleErrorSetTypeNts(name), - try Value.Tag.@"error".create(anon_decl.arena(), .{ - .name = ip.stringToSlice(name), - }), + error_set_type, + (try mod.intern(.{ .err = .{ + .ty = error_set_type.ip_index, + .name = name, + } })).toValue(), 0, // default alignment )); }, @@ -24714,14 +24721,13 @@ fn finishFieldCallBind( } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { - const pointer = try sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(arena, .{ - .container_ptr = struct_ptr_val, - .container_ty = container_ty, - .field_index = field_index, - }), - ); + const pointer = try sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = struct_ptr_val.ip_index, + .index = field_index, + } }, + } })).toValue()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } @@ -24901,22 +24907,22 @@ fn structFieldPtrByIndex( const ptr_field_ty = try Type.ptr(sema.arena, mod, ptr_ty_data); if (field.is_comptime) { - const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = field.ty, - .field_val = try field.default_val.copy(sema.arena), - }); - return sema.addConstant(ptr_field_ty, val); + const val = try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .comptime_field = try field.default_val.intern(field.ty, mod) }, + } }); + return sema.addConstant(ptr_field_ty, val.toValue()); } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = struct_ptr_val, - .container_ty = struct_ptr_ty.childType(mod), - .field_index = field_index, - }), - ); + const val = try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = try struct_ptr_val.intern(struct_ptr_ty, mod), + .index = field_index, + } }, + } }); + return sema.addConstant(ptr_field_ty, val.toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24955,7 +24961,7 @@ fn structFieldVal( if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { return sema.addConstant(field.ty, opv); } - return sema.addConstant(field.ty, try struct_val.fieldValue(field.ty, mod, field_index)); + return sema.addConstant(field.ty, try struct_val.fieldValue(mod, field_index)); } try sema.requireRuntimeBlock(block, src, null); @@ -24999,7 +25005,7 @@ fn tupleFieldIndex( field_name_src: LazySrcLoc, ) CompileError!u32 { const mod = sema.mod; - assert(!std.mem.eql(u8, field_name, "len")); + assert(!mem.eql(u8, field_name, "len")); if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { if (field_index < tuple_ty.structFieldCount(mod)) return field_index; return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ @@ -25109,14 +25115,13 @@ fn unionFieldPtr( }, .Packed, .Extern => {}, } - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(arena, .{ - .container_ptr = union_ptr_val, - .container_ty = union_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = union_ptr_val.ip_index, + .index = field_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -25267,7 +25272,7 @@ fn elemPtrOneLayerOnly( const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, mod); + const elem_ptr = try ptr_val.elemPtr(indexable_ty, index, mod); const result_ty = try sema.elemPtrType(indexable_ty, index); return sema.addConstant(result_ty, elem_ptr); }; @@ -25313,7 +25318,7 @@ fn elemVal( const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, mod); + const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } @@ -25407,22 +25412,20 @@ fn tupleFieldPtr( }); if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { - const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = field_ty, - .field_val = default_val, - }); - return sema.addConstant(ptr_field_ty, val); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .comptime_field = default_val.ip_index }, + } })).toValue()); } if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| { - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = tuple_ptr_val, - .container_ty = tuple_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.ip_index, + .addr = .{ .field = .{ + .base = tuple_ptr_val.ip_index, + .index = field_index, + } }, + } })).toValue()); } if (!init) { @@ -25463,7 +25466,7 @@ fn tupleField( if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, try tuple_val.fieldValue(tuple_ty, mod, field_index)); + return sema.addConstant(field_ty, try tuple_val.fieldValue(mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25575,7 +25578,7 @@ fn elemPtrArray( return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, mod); + const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } @@ -25631,7 +25634,7 @@ fn elemValSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } @@ -25691,7 +25694,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, mod); + const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -25851,7 +25854,7 @@ fn coerceExtra( // Function body to function pointer. if (inst_ty.zigTypeTag(mod) == .Fn) { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const fn_decl = fn_val.pointerDecl().?; + const fn_decl = fn_val.pointerDecl(mod).?; const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } @@ -26080,14 +26083,14 @@ fn coerceExtra( if (inst_child_ty.structFieldCount(mod) == 0) { // Optional slice is represented with a null pointer so // we use a dummy pointer value with the required alignment. - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = if (dest_info.@"align" != 0) + return sema.addConstant(dest_ty, (try mod.intern(.{ .ptr = .{ + .ty = dest_ty.ip_index, + .addr = .{ .int = (if (dest_info.@"align" != 0) try mod.intValue(Type.usize, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(mod, sema.arena), - .len = try mod.intValue(Type.usize, 0), - }); - return sema.addConstant(dest_ty, slice_val); + try dest_info.pointee_type.lazyAbiAlignment(mod)).ip_index }, + .len = (try mod.intValue(Type.usize, 0)).ip_index, + } })).toValue()); } // pointer to tuple to slice @@ -26255,7 +26258,8 @@ fn coerceExtra( .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const bytes = val.castTag(.enum_literal).?.data; + const string = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const bytes = mod.intern_pool.stringToSlice(string); const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse { const msg = msg: { const msg = try sema.errMsg( @@ -26292,26 +26296,30 @@ fn coerceExtra( if (maybe_inst_val) |inst_val| { switch (inst_val.ip_index) { .undef => return sema.addConstUndef(dest_ty), - .none => switch (inst_val.tag()) { - .eu_payload => { - const payload = try sema.addConstant( - inst_ty.errorUnionPayload(mod), - inst_val.castTag(.eu_payload).?.data, - ); - return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { - error.NotCoercible => break :eu, - else => |e| return e, - }; + else => switch (mod.intern_pool.indexToKey(inst_val.ip_index)) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| { + const error_set_ty = inst_ty.errorUnionSet(mod); + const error_set_val = try sema.addConstant(error_set_ty, (try mod.intern(.{ .err = .{ + .ty = error_set_ty.ip_index, + .name = err_name, + } })).toValue()); + return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src); + }, + .payload => |payload| { + const payload_val = try sema.addConstant( + inst_ty.errorUnionPayload(mod), + payload.toValue(), + ); + return sema.wrapErrorUnionPayload(block, dest_ty, payload_val, inst_src) catch |err| switch (err) { + error.NotCoercible => break :eu, + else => |e| return e, + }; + }, }, - else => {}, + else => unreachable, }, - else => {}, } - const error_set = try sema.addConstant( - inst_ty.errorUnionSet(mod), - inst_val, - ); - return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); } }, .ErrorSet => { @@ -27029,7 +27037,7 @@ fn coerceInMemoryAllowedErrorSets( }, } - if (dst_ies.func == sema.owner_func) { + if (dst_ies.func == sema.owner_func_index.unwrap()) { // We are trying to coerce an error set to the current function's // inferred error set. try dst_ies.addErrorSet(src_ty, ip, gpa); @@ -27323,7 +27331,7 @@ fn coerceVarArgParam( ), .Fn => blk: { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const fn_decl = fn_val.pointerDecl().?; + const fn_decl = fn_val.pointerDecl(mod).?; break :blk try sema.analyzeDeclRef(fn_decl); }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), @@ -27441,7 +27449,7 @@ fn storePtr2( try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(mod)) { try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty); return; } else break :rs ptr_src; @@ -27593,7 +27601,7 @@ fn storePtrVal( } const ComptimePtrMutationKit = struct { - decl_ref_mut: Value.Payload.DeclRefMut.Data, + decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, pointee: union(enum) { /// The pointer type matches the actual comptime Value so a direct /// modification is possible. @@ -27619,12 +27627,12 @@ const ComptimePtrMutationKit = struct { decl_arena: std.heap.ArenaAllocator = undefined, fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { - const decl = mod.declPtr(self.decl_ref_mut.decl_index); + const decl = mod.declPtr(self.decl_ref_mut.decl); return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena); } fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { - const decl = mod.declPtr(self.decl_ref_mut.decl_index); + const decl = mod.declPtr(self.decl_ref_mut.decl); decl.value_arena.?.release(&self.decl_arena); self.decl_arena = undefined; } @@ -27637,6 +27645,7 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { + if (true) unreachable; const mod = sema.mod; switch (ptr_val.tag()) { .decl_ref_mut => { @@ -28169,7 +28178,7 @@ fn beginComptimePtrMutation( }, } }, - .decl_ref => unreachable, // isComptimeMutablePtr() has been checked already + .decl_ref => unreachable, // isComptimeMutablePtr has been checked already else => unreachable, } } @@ -28189,7 +28198,7 @@ fn beginComptimePtrMutationInner( const decl = mod.declPtr(decl_ref_mut.decl_index); var decl_arena: std.heap.ArenaAllocator = undefined; - const allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena); + const allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); decl_val.* = try decl_val.unintern(allocator, mod); @@ -28273,44 +28282,83 @@ fn beginComptimePtrLoad( const mod = sema.mod; const target = mod.getTarget(); - var deref: ComptimePtrLoadKit = switch (ptr_val.ip_index) { - .null_value => { - return sema.fail(block, src, "attempt to use null value", .{}); - }, - - .none => switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - => blk: { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, + var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl => blk: { + const decl_index = switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, }; - const is_mutable = ptr_val.tag() == .decl_ref_mut; const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); - if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; + if (decl.getVariable(mod) != null) return error.RuntimeLoad; const layout_defined = decl.ty.hasWellDefinedLayout(mod); break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, .pointee = decl_tv, - .is_mutable = is_mutable, + .is_mutable = false, .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, }; }, + .int => return error.RuntimeLoad, + .eu_payload, .opt_payload => |container_ptr| blk: { + const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod); + const payload_ty = ptr.ty.toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty); - .elem_ptr => blk: { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const elem_ty = elem_ptr.elem_ty; - var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null); + // eu_payload and opt_payload never have a well-defined layout + if (deref.parent != null) { + deref.parent = null; + deref.ty_without_well_defined_layout = container_ty; + } + + if (deref.pointee) |*tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + const payload_val = switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}), + .payload => |payload| payload, + }, + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => opt.val, + }, + else => unreachable, + }; + tv.* = TypedValue{ .ty = payload_ty, .val = payload_val.toValue() }; + break :blk deref; + } + } + deref.pointee = null; + break :blk deref; + }, + .comptime_field => |comptime_field| blk: { + const field_ty = mod.intern_pool.typeOf(comptime_field).toType(); + break :blk ComptimePtrLoadKit{ + .parent = null, + .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() }, + .is_mutable = false, + .ty_without_well_defined_layout = field_ty, + }; + }, + .elem => |elem_ptr| blk: { + const elem_ty = ptr.ty.toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); // This code assumes that elem_ptrs have been "flattened" in order for direct dereference // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { - assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, mod))); + switch (mod.intern_pool.indexToKey(elem_ptr.base)) { + .ptr => |base_ptr| switch (base_ptr.addr) { + .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)), + else => {}, + }, + else => {}, } if (elem_ptr.index != 0) { @@ -28327,7 +28375,7 @@ fn beginComptimePtrLoad( } } - // If we're loading an elem_ptr that was derived from a different type + // If we're loading an elem that was derived from a different type // than the true type of the underlying decl, we cannot deref directly const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { const deref_elem_ty = deref.pointee.?.ty.childType(mod); @@ -28373,31 +28421,25 @@ fn beginComptimePtrLoad( }; break :blk deref; }, + .field => |field_ptr| blk: { + const field_index = @intCast(u32, field_ptr.index); + const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); - .slice => blk: { - const slice = ptr_val.castTag(.slice).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null); - }, - - .field_ptr => blk: { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); - var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); - - if (field_ptr.container_ty.hasWellDefinedLayout(mod)) { - const struct_obj = mod.typeToStruct(field_ptr.container_ty); + if (container_ty.hasWellDefinedLayout(mod)) { + const struct_obj = mod.typeToStruct(container_ty); if (struct_obj != null and struct_obj.?.layout == .Packed) { // packed structs are not byte addressable deref.parent = null; } else if (deref.parent) |*parent| { // Update the byte offset (in-place) - try sema.resolveTypeLayout(field_ptr.container_ty); - const field_offset = field_ptr.container_ty.structFieldOffset(field_index, mod); + try sema.resolveTypeLayout(container_ty); + const field_offset = container_ty.structFieldOffset(field_index, mod); parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); } } else { deref.parent = null; - deref.ty_without_well_defined_layout = field_ptr.container_ty; + deref.ty_without_well_defined_layout = container_ty; } const tv = deref.pointee orelse { @@ -28405,294 +28447,40 @@ fn beginComptimePtrLoad( break :blk deref; }; const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; if (!coerce_in_mem_ok) { deref.pointee = null; break :blk deref; } - if (field_ptr.container_ty.isSlice(mod)) { - const slice_val = tv.val.castTag(.slice).?.data; + if (container_ty.isSlice(mod)) { deref.pointee = switch (field_index) { - Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(mod), - .val = slice_val.ptr, + Value.slice_ptr_index => TypedValue{ + .ty = container_ty.slicePtrFieldType(mod), + .val = tv.val.slicePtr(mod), }, - Value.Payload.Slice.len_index => TypedValue{ + Value.slice_len_index => TypedValue{ .ty = Type.usize, - .val = slice_val.len, + .val = mod.intern_pool.indexToKey(tv.val.ip_index).ptr.len.toValue(), }, else => unreachable, }; } else { - const field_ty = field_ptr.container_ty.structFieldType(field_index, mod); + const field_ty = container_ty.structFieldType(field_index, mod); deref.pointee = TypedValue{ .ty = field_ty, - .val = try tv.val.fieldValue(tv.ty, mod, field_index), + .val = try tv.val.fieldValue(mod, field_index), }; } break :blk deref; }, - - .comptime_field_ptr => blk: { - const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data; - break :blk ComptimePtrLoadKit{ - .parent = null, - .pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val }, - .is_mutable = false, - .ty_without_well_defined_layout = comptime_field_ptr.field_ty, - }; - }, - - .opt_payload_ptr, - .eu_payload_ptr, - => blk: { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - const payload_ty = switch (ptr_val.tag()) { - .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(mod), - .opt_payload_ptr => payload_ptr.container_ty.optionalChild(mod), - else => unreachable, - }; - var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); - - // eu_payload_ptr and opt_payload_ptr never have a well-defined layout - if (deref.parent != null) { - deref.parent = null; - deref.ty_without_well_defined_layout = payload_ptr.container_ty; - } - - if (deref.pointee) |*tv| { - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - const payload_val = switch (ptr_val.tag()) { - .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { - return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); - }, - .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); - break :opt tv.val; - }, - else => unreachable, - }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; - break :blk deref; - } - } - deref.pointee = null; - break :blk deref; - }, - .opt_payload => blk: { - const opt_payload = ptr_val.castTag(.opt_payload).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); - }, - - .variable, - .extern_fn, - .function, - => return error.RuntimeLoad, - - else => unreachable, }, - else => switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { - .int => return error.RuntimeLoad, - .ptr => |ptr| switch (ptr.addr) { - .@"var", .int => return error.RuntimeLoad, - .decl, .mut_decl => blk: { - const decl_index = switch (ptr.addr) { - .decl => |decl| decl, - .mut_decl => |mut_decl| mut_decl.decl, - else => unreachable, - }; - const decl = mod.declPtr(decl_index); - const decl_tv = try decl.typedValue(); - if (decl_tv.val.tagIsVariable()) return error.RuntimeLoad; - - const layout_defined = decl.ty.hasWellDefinedLayout(mod); - break :blk ComptimePtrLoadKit{ - .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, - .pointee = decl_tv, - .is_mutable = false, - .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, - }; - }, - .eu_payload, .opt_payload => |container_ptr| blk: { - const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod); - const payload_ty = ptr.ty.toType().childType(mod); - var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty); - - // eu_payload_ptr and opt_payload_ptr never have a well-defined layout - if (deref.parent != null) { - deref.parent = null; - deref.ty_without_well_defined_layout = container_ty; - } - - if (deref.pointee) |*tv| { - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - const payload_val = switch (ptr_val.tag()) { - .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { - return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); - }, - .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull(mod)) return sema.fail(block, src, "attempt to use null value", .{}); - break :opt tv.val; - }, - else => unreachable, - }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; - break :blk deref; - } - } - deref.pointee = null; - break :blk deref; - }, - .comptime_field => |comptime_field| blk: { - const field_ty = mod.intern_pool.typeOf(comptime_field).toType(); - break :blk ComptimePtrLoadKit{ - .parent = null, - .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() }, - .is_mutable = false, - .ty_without_well_defined_layout = field_ty, - }; - }, - .elem => |elem_ptr| blk: { - const elem_ty = ptr.ty.toType().childType(mod); - var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); - - // This code assumes that elem_ptrs have been "flattened" in order for direct dereference - // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that - // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - switch (mod.intern_pool.indexToKey(elem_ptr.base)) { - .ptr => |base_ptr| switch (base_ptr.addr) { - .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)), - else => {}, - }, - else => {}, - } - - if (elem_ptr.index != 0) { - if (elem_ty.hasWellDefinedLayout(mod)) { - if (deref.parent) |*parent| { - // Update the byte offset (in-place) - const elem_size = try sema.typeAbiSize(elem_ty); - const offset = parent.byte_offset + elem_size * elem_ptr.index; - parent.byte_offset = try sema.usizeCast(block, src, offset); - } - } else { - deref.parent = null; - deref.ty_without_well_defined_layout = elem_ty; - } - } - - // If we're loading an elem that was derived from a different type - // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { - const deref_elem_ty = deref.pointee.?.ty.childType(mod); - break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; - } else false; - if (!ty_matches) { - deref.pointee = null; - break :blk deref; - } - - var array_tv = deref.pointee.?; - const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); - if (maybe_array_ty) |load_ty| { - // It's possible that we're loading a [N]T, in which case we'd like to slice - // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { - const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); - deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, mod), - .val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N), - } else null; - break :blk deref; - } - } - - if (elem_ptr.index >= check_len) { - deref.pointee = null; - break :blk deref; - } - if (elem_ptr.index == check_len - 1) { - if (array_tv.ty.sentinel(mod)) |sent| { - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = sent, - }; - break :blk deref; - } - } - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = try array_tv.val.elemValue(mod, elem_ptr.index), - }; - break :blk deref; - }, - .field => |field_ptr| blk: { - const field_index = @intCast(u32, field_ptr.index); - const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); - var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); - - if (container_ty.hasWellDefinedLayout(mod)) { - const struct_obj = mod.typeToStruct(container_ty); - if (struct_obj != null and struct_obj.?.layout == .Packed) { - // packed structs are not byte addressable - deref.parent = null; - } else if (deref.parent) |*parent| { - // Update the byte offset (in-place) - try sema.resolveTypeLayout(container_ty); - const field_offset = container_ty.structFieldOffset(field_index, mod); - parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); - } - } else { - deref.parent = null; - deref.ty_without_well_defined_layout = container_ty; - } - - const tv = deref.pointee orelse { - deref.pointee = null; - break :blk deref; - }; - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; - if (!coerce_in_mem_ok) { - deref.pointee = null; - break :blk deref; - } - - if (container_ty.isSlice(mod)) { - const slice_val = tv.val.castTag(.slice).?.data; - deref.pointee = switch (field_index) { - Value.Payload.Slice.ptr_index => TypedValue{ - .ty = container_ty.slicePtrFieldType(mod), - .val = slice_val.ptr, - }, - Value.Payload.Slice.len_index => TypedValue{ - .ty = Type.usize, - .val = slice_val.len, - }, - else => unreachable, - }; - } else { - const field_ty = container_ty.structFieldType(field_index, mod); - deref.pointee = TypedValue{ - .ty = field_ty, - .val = try tv.val.fieldValue(tv.ty, mod, field_index), - }; - } - break :blk deref; - }, - }, - else => unreachable, + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => try sema.beginComptimePtrLoad(block, src, opt.val.toValue(), null), }, + else => unreachable, }; if (deref.pointee) |tv| { @@ -28853,7 +28641,7 @@ fn coerceCompatiblePtrs( } // The comptime Value representation is compatible with both types. return sema.addConstant(dest_ty, (try mod.intern_pool.getCoerced( - mod.gpa, + sema.gpa, try val.intern(inst_ty, mod), dest_ty.ip_index, )).toValue()); @@ -29538,7 +29326,7 @@ fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { }; } -fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void { +fn ensureFuncBodyAnalyzed(sema: *Sema, func: Module.Fn.Index) CompileError!void { sema.mod.ensureFuncBodyAnalyzed(func) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; @@ -29550,6 +29338,7 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void { } fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { + const mod = sema.mod; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( @@ -29558,15 +29347,23 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { 0, // default alignment ); try sema.maybeQueueFuncBodyAnalysis(decl); - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl); - return try Value.Tag.decl_ref.create(sema.arena, decl); + try mod.declareDeclDependency(sema.owner_decl_index, decl); + const result = try mod.intern(.{ .ptr = .{ + .ty = (try mod.singleConstPtrType(ty)).ip_index, + .addr = .{ .decl = decl }, + } }); + return result.toValue(); } fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { + const mod = sema.mod; const val = opt_val orelse return Value.null; const ptr_val = try sema.refValue(block, ty, val); - const result = try Value.Tag.opt_payload.create(sema.arena, ptr_val); - return result; + const result = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).ip_index)).ip_index, + .val = ptr_val.ip_index, + } }); + return result.toValue(); } fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref { @@ -29587,10 +29384,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo const ptr_ty = try mod.ptrType(.{ .elem_type = decl_tv.ty.ip_index, .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), - .is_const = if (decl_tv.val.castTag(.variable)) |payload| - !payload.data.is_mutable - else - false, + .is_const = if (decl.getVariable(mod)) |variable| variable.is_const else false, .address_space = decl.@"addrspace", }); if (analyze_fn_body) { @@ -29608,8 +29402,8 @@ fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { const tv = try decl.typedValue(); if (tv.ty.zigTypeTag(mod) != .Fn) return; if (!try sema.fnHasRuntimeBits(tv.ty)) return; - const func = tv.val.castTag(.function) orelse return; // undef or extern_fn - try mod.ensureFuncBodyAnalysisQueued(func.data); + const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap() orelse return; // undef or extern_fn + try mod.ensureFuncBodyAnalysisQueued(func_index); } fn analyzeRef( @@ -29622,14 +29416,12 @@ fn analyzeRef( if (try sema.resolveMaybeUndefVal(operand)) |val| { switch (val.ip_index) { - .none => switch (val.tag()) { - .extern_fn, .function => { - const decl_index = val.pointerDecl().?; - return sema.analyzeDeclRef(decl_index); - }, + .none => {}, + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), + .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), else => {}, }, - else => {}, } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -29854,7 +29646,7 @@ fn analyzeIsNonErrComptimeOnly( if (other_ies.errors.count() != 0) break :blk; } - if (ies.func == sema.owner_func) { + if (ies.func == sema.owner_func_index.unwrap()) { // We're checking the inferred errorset of the current function and none of // its child inferred error sets contained any errors meaning that any value // so far with this type can't contain errors either. @@ -29873,7 +29665,7 @@ fn analyzeIsNonErrComptimeOnly( if (err_union.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - if (err_union.getError() == null) { + if (err_union.getError(mod) == null) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -30137,7 +29929,7 @@ fn analyzeSlice( const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sema.arena, sentinel_index, sema.mod); + const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sentinel_index, sema.mod); const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, @@ -30233,7 +30025,7 @@ fn analyzeSlice( if (!new_ptr_val.isUndef(mod)) { return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced( - mod.gpa, + sema.gpa, try new_ptr_val.intern(new_ptr_ty, mod), return_ty.ip_index, )).toValue()); @@ -30753,7 +30545,10 @@ fn wrapOptional( inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(inst)) |val| { - return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val)); + return sema.addConstant(dest_ty, (try sema.mod.intern(.{ .opt = .{ + .ty = dest_ty.ip_index, + .val = val.ip_index, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30771,7 +30566,10 @@ fn wrapErrorUnionPayload( const dest_payload_ty = dest_ty.errorUnionPayload(mod); const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveMaybeUndefVal(coerced)) |val| { - return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val)); + return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ + .ty = dest_ty.ip_index, + .val = .{ .payload = val.ip_index }, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); try sema.queueFullTypeResolution(dest_payload_ty); @@ -30794,27 +30592,20 @@ fn wrapErrorUnionSet( .anyerror_type => {}, else => switch (ip.indexToKey(dest_err_set_ty.ip_index)) { .error_set_type => |error_set_type| ok: { - const expected_name = val.castTag(.@"error").?.data.name; - if (ip.getString(expected_name).unwrap()) |expected_name_interned| { - if (error_set_type.nameIndex(ip, expected_name_interned) != null) - break :ok; - } + const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .inferred_error_set_type => |ies_index| ok: { const ies = mod.inferredErrorSetPtr(ies_index); - const expected_name = val.castTag(.@"error").?.data.name; + const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. if (ies.is_anyerror) break :ok; - if (ip.getString(expected_name).unwrap()) |expected_name_interned| { - if (ies.errors.contains(expected_name_interned)) break :ok; - } - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { - break :ok; - } + if (ies.errors.contains(expected_name)) break :ok; + if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, @@ -31462,43 +31253,33 @@ pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileErro /// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { switch (val.ip_index) { - .none => switch (val.tag()) { - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return sema.resolveTypeLayout(ty); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return sema.resolveTypeLayout(ty); - }, - .comptime_field_ptr => { - const field_ptr = val.castTag(.comptime_field_ptr).?.data; - return sema.resolveLazyValue(field_ptr.field_val); - }, - .eu_payload, - .opt_payload, - => { - const sub_val = val.cast(Value.Payload.SubValue).?.data; - return sema.resolveLazyValue(sub_val); - }, - .@"union" => { - const union_val = val.castTag(.@"union").?.data; - return sema.resolveLazyValue(union_val.val); - }, - .aggregate => { - const aggregate = val.castTag(.aggregate).?.data; - for (aggregate) |elem_val| { - try sema.resolveLazyValue(elem_val); - } - }, - .slice => { - const slice = val.castTag(.slice).?.data; - try sema.resolveLazyValue(slice.ptr); - return sema.resolveLazyValue(slice.len); + .none => {}, + else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()), + }, + .ptr => |ptr| { + switch (ptr.addr) { + .decl, .mut_decl => {}, + .int => |int| try sema.resolveLazyValue(int.toValue()), + .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()), + .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()), + .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()), + } + if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue()); + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => {}, + .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()), + .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()), + }, + .un => |un| { + try sema.resolveLazyValue(un.tag.toValue()); + try sema.resolveLazyValue(un.val.toValue()); }, - else => return, + else => {}, }, - else => return, } } @@ -31597,7 +31378,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { else blk: { const decl = mod.declPtr(struct_obj.owner_decl); var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(mod.gpa, &decl_arena); + const decl_arena_allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count()); }; @@ -31662,18 +31443,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - var sema: Sema = .{ - .mod = mod, - .gpa = gpa, - .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, - .code = zir, - .owner_decl = decl, - .owner_decl_index = decl_index, - .func = null, - .fn_ret_ty = Type.void, - .owner_func = null, - }; + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none }; defer sema.deinit(); var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); @@ -31720,8 +31490,10 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -31974,16 +31746,23 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -32141,8 +31920,8 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .manyptr_const_u8_type, .manyptr_const_u8_sentinel_0_type, .single_const_pointer_to_comptime_int_type, - .const_slice_u8_type, - .const_slice_u8_sentinel_0_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, .anyerror_void_error_union_type, .generic_poison_type, .empty_struct_type, @@ -32288,18 +32067,19 @@ fn resolveInferredErrorSet( if (ies.is_resolved) return; - if (ies.func.state == .in_progress) { + const func = mod.funcPtr(ies.func); + if (func.state == .in_progress) { return sema.fail(block, src, "unable to resolve inferred error set", .{}); } // In order to ensure that all dependencies are properly added to the set, we // need to ensure the function body is analyzed of the inferred error set. // However, in the case of comptime/inline function calls with inferred error sets, - // each call gets a new InferredErrorSet object, which points to the same - // `*Module.Fn`. Not only is the function not relevant to the inferred error set + // each call gets a new InferredErrorSet object, which contains the same + // `Module.Fn.Index`. Not only is the function not relevant to the inferred error set // in this case, it may be a generic function which would cause an assertion failure // if we called `ensureFuncBodyAnalyzed` on it here. - const ies_func_owner_decl = mod.declPtr(ies.func.owner_decl); + const ies_func_owner_decl = mod.declPtr(func.owner_decl); const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, @@ -32414,8 +32194,10 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -32754,8 +32536,10 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, }; defer sema.deinit(); @@ -33111,7 +32895,7 @@ fn generateUnionTagTypeNumbered( const name = name: { const fqn = try union_obj.getFullyQualifiedName(mod); defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); + break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, @@ -33160,7 +32944,7 @@ fn generateUnionTagTypeSimple( const name = name: { const fqn = try union_obj.getFullyQualifiedName(mod); defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); + break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, @@ -33288,19 +33072,19 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .inferred_error_set_type, => null, - .array_type => |array_type| { - if (array_type.len == 0) - return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(array_type.child.toType())) != null) { - return Value.initTag(.the_only_possible_value); + inline .array_type, .vector_type => |seq_type| { + if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .elems = &.{} }, + } })).toValue(); + if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| { + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = opv.ip_index }, + } })).toValue(); } return null; }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return Value.initTag(.empty_array); - if (try sema.typeHasOnePossibleValue(vector_type.child.toType())) |v| return v; - return null; - }, .opt_type => |child| { if (child == .noreturn_type) { return try mod.nullValue(ty); @@ -33466,16 +33250,23 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -33625,10 +33416,13 @@ fn analyzeComptimeAlloc( decl.@"align" = alignment; try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); - return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .runtime_index = block.runtime_index, - .decl_index = decl_index, - })); + return sema.addConstant(ptr_type, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_type.ip_index, + .addr = .{ .mut_decl = .{ + .decl = decl_index, + .runtime_index = block.runtime_index, + } }, + } })).toValue()); } /// The places where a user can specify an address space attribute @@ -33969,16 +33763,23 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -34337,8 +34138,9 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { - if (ty.ip_index == .comptime_int_type) return true; const mod = sema.mod; + if (ty.ip_index == .comptime_int_type) return true; + const info = ty.intInfo(mod); switch (val.ip_index) { .undef, .zero, @@ -34346,40 +34148,8 @@ fn intFitsInType( .zero_u8, => return true, - .none => switch (val.tag()) { - .lazy_align => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); - // If it is u16 or bigger we know the alignment fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .lazy_size => { - const info = ty.intInfo(mod); - const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); - // If it is u64 or bigger we know the size fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - - .the_only_possible_value => { - assert(ty.intInfo(mod).bits == 0); - return true; - }, - - .decl_ref_mut, - .extern_fn, - .decl_ref, - .function, - .variable, - => { - const info = ty.intInfo(mod); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable, .extern_func, .func, .ptr => { const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { @@ -34387,27 +34157,51 @@ fn intFitsInType( .unsigned => info.bits >= ptr_bits, }; }, - - .aggregate => { - assert(ty.zigTypeTag(mod) == .Vector); - for (val.castTag(.aggregate).?.data, 0..) |elem, i| { - if (!(try sema.intFitsInType(elem, ty.scalarType(mod), null))) { - if (vector_index) |some| some.* = i; - return false; - } - } - return true; + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => { + var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return big_int.fitsInTwosComp(info.signedness, info.bits); + }, + .lazy_align => |lazy_ty| { + const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); + // If it is u16 or bigger we know the alignment fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiAlignment(lazy_ty.toType()); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + .lazy_size => |lazy_ty| { + const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); + // If it is u64 or bigger we know the size fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiSize(lazy_ty.toType()); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, }, - - else => unreachable, - }, - - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| { - const info = ty.intInfo(mod); - var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - return big_int.fitsInTwosComp(info.signedness, info.bits); + .aggregate => |aggregate| { + assert(ty.zigTypeTag(mod) == .Vector); + return switch (aggregate.storage) { + .bytes => |bytes| for (bytes, 0..) |byte, i| { + if (byte == 0) continue; + const actual_needed_bits = std.math.log2(byte) + 1 + @boolToInt(info.signedness == .signed); + if (info.bits >= actual_needed_bits) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + .elems, .repeated_elem => for (switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems, + .repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem), + }, 0..) |elem, i| { + if (try sema.intFitsInType(elem.toValue(), ty.scalarType(mod), null)) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + }; }, else => unreachable, }, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 569c1430d5..2222c1060e 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -102,248 +102,15 @@ pub fn print( return writer.writeAll(" }"); }, - .the_only_possible_value => return writer.writeAll("0"), - .lazy_align => { - const sub_ty = val.castTag(.lazy_align).?.data; - const x = sub_ty.abiAlignment(mod); - return writer.print("{d}", .{x}); - }, - .lazy_size => { - const sub_ty = val.castTag(.lazy_size).?.data; - const x = sub_ty.abiSize(mod); - return writer.print("{d}", .{x}); - }, - .function => return writer.print("(function '{s}')", .{ - mod.declPtr(val.castTag(.function).?.data.owner_decl).name, - }), - .extern_fn => return writer.writeAll("(extern function)"), - .variable => unreachable, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref mut '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (level == 0) { - return writer.writeAll("(comptime field ptr)"); - } - return print(.{ - .ty = payload.field_ty, - .val = payload.field_val, - }, writer, level - 1, mod); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { - try print(.{ - .ty = elem_ptr.elem_ty, - .val = elem_ptr.array_ptr, - }, writer, level - 1, mod); - } - return writer.print("[{}]", .{elem_ptr.index}); - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { - try print(.{ - .ty = field_ptr.container_ty, - .val = field_ptr.container_ptr, - }, writer, level - 1, mod); - } - - if (field_ptr.container_ty.zigTypeTag(mod) == .Struct) { - switch (mod.intern_pool.indexToKey(field_ptr.container_ty.ip_index)) { - .anon_struct_type => |anon_struct| { - if (anon_struct.names.len == 0) { - return writer.print(".@\"{d}\"", .{field_ptr.field_index}); - } - }, - else => {}, - } - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index, mod); - return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.zigTypeTag(mod) == .Union) { - const field_name = field_ptr.container_ty.unionFields(mod).keys()[field_ptr.field_index]; - return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.isSlice(mod)) { - switch (field_ptr.field_index) { - Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), - Value.Payload.Slice.len_index => return writer.writeAll(".len"), - else => unreachable, - } - } - }, - .empty_array => return writer.writeAll(".{}"), - .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { const str_lit = val.castTag(.str_lit).?.data; const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); }, - .repeated => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - var i: u32 = 0; - try writer.writeAll(".{ "); - const elem_tv = TypedValue{ - .ty = ty.elemType2(mod), - .val = val.castTag(.repeated).?.data, - }; - const len = ty.arrayLen(mod); - const max_len = std.math.min(len, max_aggregate_items); - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - try print(elem_tv, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .empty_array_sentinel => { - if (level == 0) { - return writer.writeAll(".{ (sentinel) }"); - } - try writer.writeAll(".{ "); - try print(.{ - .ty = ty.elemType2(mod), - .val = ty.sentinel(mod).?, - }, writer, level - 1, mod); - return writer.writeAll(" }"); - }, - .slice => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const payload = val.castTag(.slice).?.data; - const elem_ty = ty.elemType2(mod); - const len = payload.len.toUnsignedInt(mod); - - if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); - var buf: [max_string_len]u8 = undefined; - - var i: u32 = 0; - while (i < max_len) : (i += 1) { - const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { - error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic - }; - if (elem_val.isUndef(mod)) break :str; - buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; - } - - // TODO would be nice if this had a bit of unicode awareness. - const truncated = if (len > max_string_len) " (truncated)" else ""; - return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); - } - - try writer.writeAll(".{ "); - - const max_len = std.math.min(len, max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { - error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic - }; - try print(.{ - .ty = elem_ty, - .val = elem_val, - }, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), - .eu_payload => { - val = val.castTag(.eu_payload).?.data; - ty = ty.errorUnionPayload(mod); - }, - .opt_payload => { - val = val.castTag(.opt_payload).?.data; - ty = ty.optionalChild(mod); - return print(.{ .ty = ty, .val = val }, writer, level, mod); - }, - .eu_payload_ptr => { - try writer.writeAll("&"); - if (level == 0) { - return writer.writeAll("(ptr)"); - } - - const data = val.castTag(.eu_payload_ptr).?.data; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = ty.toValue(), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - try print(.{ - .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); - return; - }, - .opt_payload_ptr => { - if (level == 0) { - return writer.writeAll("&(ptr)"); - } - - const data = val.castTag(.opt_payload_ptr).?.data; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = ty.toValue(), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - try print(.{ - .ty = mod.singleMutPtrType(data.container_ty) catch @panic("OOM"), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); - return; - }, - // TODO these should not appear in this function .inferred_alloc => return writer.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), - .runtime_value => return writer.writeAll("[runtime value]"), }, else => { const key = mod.intern_pool.indexToKey(val.ip_index); @@ -353,6 +120,12 @@ pub fn print( switch (key) { .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), + .lazy_align => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiAlignment(mod), + }), + .lazy_size => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiSize(mod), + }), }, .enum_tag => |enum_tag| { if (level == 0) { @@ -407,7 +180,7 @@ fn printAggregate( } try print(.{ .ty = ty.structFieldType(i, mod), - .val = try val.fieldValue(ty, mod, i), + .val = try val.fieldValue(mod, i), }, writer, level - 1, mod); } if (ty.structFieldCount(mod) > max_aggregate_items) { @@ -424,7 +197,7 @@ fn printAggregate( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = try val.fieldValue(ty, mod, i); + const elem = try val.fieldValue(mod, i); if (elem.isUndef(mod)) break :str; buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; } @@ -441,7 +214,7 @@ fn printAggregate( if (i != 0) try writer.writeAll(", "); try print(.{ .ty = elem_ty, - .val = try val.fieldValue(ty, mod, i), + .val = try val.fieldValue(mod, i), }, writer, level - 1, mod); } if (len > max_aggregate_items) { diff --git a/src/Zir.zig b/src/Zir.zig index 45a6fae90b..3afff5ba6a 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2108,8 +2108,8 @@ pub const Inst = struct { manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), - const_slice_u8_type = @enumToInt(InternPool.Index.const_slice_u8_type), - const_slice_u8_sentinel_0_type = @enumToInt(InternPool.Index.const_slice_u8_sentinel_0_type), + slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type), + slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), inferred_alloc_const_type = @enumToInt(InternPool.Index.inferred_alloc_const_type), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index c9126747da..faf158e2a4 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -328,7 +328,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -339,6 +339,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -4311,9 +4312,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. if (try self.air.value(callee, mod)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -4353,10 +4352,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .tag = .blr, .data = .{ .reg = .x30 }, }); - } else if (func_value.castTag(.extern_fn)) |func_payload| { - const extern_fn = func_payload.data; - const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); - const lib_name = mem.sliceTo(extern_fn.lib_name, 0); + } else if (func_value.getExternFunc(mod)) |extern_func| { + const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0); + const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.MachO)) |macho_file| { const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -4627,7 +4625,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index fa8646be43..778662fe86 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -334,7 +334,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -345,6 +345,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -4291,9 +4292,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. if (try self.air.value(callee, mod)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -4308,7 +4307,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier @tagName(self.target.cpu.arch), }); } - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (func_value.getExternFunc(mod)) |_| { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -4573,7 +4572,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index faa2b2b7d0..a9cd130fa8 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -217,7 +217,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -228,6 +228,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -1745,8 +1746,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } if (try self.air.value(callee, mod)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; + if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); @@ -1760,7 +1760,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .imm12 = 0, } }, }); - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -1879,7 +1879,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 13f129f87b..dc086dc00f 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -260,7 +260,7 @@ const BigTomb = struct { pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -271,6 +271,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -1346,8 +1347,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // on linking. if (try self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; + if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -1374,7 +1374,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .tag = .nop, .data = .{ .nop = {} }, }); - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -1663,7 +1663,8 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2d7e4a8585..66c0399343 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1203,20 +1203,22 @@ fn genFunctype( pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: codegen.DebugInfoOutput, ) codegen.CodeGenError!codegen.Result { _ = src_loc; + const mod = bin_file.options.module.?; + const func = mod.funcPtr(func_index); var code_gen: CodeGen = .{ .gpa = bin_file.allocator, .air = air, .liveness = liveness, .code = code, .decl_index = func.owner_decl, - .decl = bin_file.options.module.?.declPtr(func.owner_decl), + .decl = mod.declPtr(func.owner_decl), .err_msg = undefined, .locals = .{}, .target = bin_file.options.target, @@ -2196,27 +2198,33 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const callee: ?Decl.Index = blk: { const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; - if (func_val.castTag(.function)) |function| { - _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); - break :blk function.data.owner_decl; - } else if (func_val.castTag(.extern_fn)) |extern_fn| { - const ext_decl = mod.declPtr(extern_fn.data.owner_decl); + if (func_val.getFunction(mod)) |function| { + _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl); + break :blk function.owner_decl; + } else if (func_val.getExternFunc(mod)) |extern_func| { + const ext_decl = mod.declPtr(extern_func.decl); const ext_info = mod.typeToFunc(ext_decl.ty).?; var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); - const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl); const atom = func.bin_file.getAtomPtr(atom_index); - const type_index = try func.bin_file.storeDeclType(extern_fn.data.owner_decl, func_type); + const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); try func.bin_file.addOrUpdateImport( mem.sliceTo(ext_decl.name, 0), atom.getSymbolIndex().?, - ext_decl.getExternFn().?.lib_name, + mod.intern_pool.stringToSliceUnwrap(ext_decl.getExternFunc(mod).?.lib_name), type_index, ); - break :blk extern_fn.data.owner_decl; - } else if (func_val.castTag(.decl_ref)) |decl_ref| { - _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data); - break :blk decl_ref.data; + break :blk extern_func.decl; + } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| { + _ = try func.bin_file.getOrCreateAtomForDecl(decl); + break :blk decl; + }, + else => {}, + }, + else => {}, } return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()}); }; @@ -2932,29 +2940,41 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { return WValue{ .stack = {} }; } -fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue { +fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { const mod = func.bin_file.base.options.module.?; - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + const ptr = mod.intern_pool.indexToKey(ptr_val.ip_index).ptr; + switch (ptr.addr) { + .decl => |decl_index| { + return func.lowerParentPtrDecl(ptr_val, decl_index, 0); + }, + .mut_decl => |mut_decl| { + const decl_index = mut_decl.decl; + return func.lowerParentPtrDecl(ptr_val, decl_index, 0); }, - .decl_ref => { - const decl_index = ptr_val.castTag(.decl_ref).?.data; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + .int, .eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}), + .opt_payload => |base_ptr| { + return func.lowerParentPtr(base_ptr.toValue()); }, - .variable => { - const decl_index = ptr_val.castTag(.variable).?.data.owner_decl; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + .comptime_field => unreachable, + .elem => |elem| { + const index = elem.index; + const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod); + const offset = index * elem_type.abiSize(mod); + const array_ptr = try func.lowerParentPtr(elem.base.toValue()); + + return WValue{ .memory_offset = .{ + .pointer = array_ptr.memory, + .offset = @intCast(u32, offset), + } }; }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent_ty = field_ptr.container_ty; + .field => |field| { + const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); + const parent_ptr = try func.lowerParentPtr(field.base.toValue()); - const field_offset = switch (parent_ty.zigTypeTag(mod)) { + const offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout(mod)) { - .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, mod), - else => parent_ty.structFieldOffset(field_ptr.field_index, mod), + .Packed => parent_ty.packedStructFieldByteOffset(field.index, mod), + else => parent_ty.structFieldOffset(field.index, mod), }, .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, @@ -2964,12 +2984,12 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - const field_offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); - break :blk field_offset; + const offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); + break :blk offset; }, }, .Pointer => switch (parent_ty.ptrSize(mod)) { - .Slice => switch (field_ptr.field_index) { + .Slice => switch (field.index) { 0 => 0, 1 => func.ptrSize(), else => unreachable, @@ -2978,19 +2998,23 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue }, else => unreachable, }; - return func.lowerParentPtr(field_ptr.container_ptr, offset + @intCast(u32, field_offset)); - }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const index = elem_ptr.index; - const elem_offset = index * elem_ptr.elem_ty.abiSize(mod); - return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset)); - }, - .opt_payload_ptr => { - const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - return func.lowerParentPtr(payload_ptr.container_ptr, offset); + + return switch (parent_ptr) { + .memory => |ptr_| WValue{ + .memory_offset = .{ + .pointer = ptr_, + .offset = @intCast(u32, offset), + }, + }, + .memory_offset => |mem_off| WValue{ + .memory_offset = .{ + .pointer = mem_off.pointer, + .offset = @intCast(u32, offset) + mem_off.offset, + }, + }, + else => unreachable, + }; }, - else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}), } } @@ -3045,21 +3069,97 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo( fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const mod = func.bin_file.base.options.module.?; var val = arg_val; - if (val.castTag(.runtime_value)) |rt| { - val = rt.data; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, } if (val.isUndefDeep(mod)) return func.emitUndefined(ty); - if (val.castTag(.decl_ref)) |decl_ref| { - const decl_index = decl_ref.data; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); - } - if (val.castTag(.decl_ref_mut)) |decl_ref_mut| { - const decl_index = decl_ref_mut.data.decl_index; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); - } - switch (ty.zigTypeTag(mod)) { - .Void => return WValue{ .none = {} }, - .Int => { + + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { + .Array => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), + .Struct => { + const struct_obj = mod.typeToStruct(ty).?; + assert(struct_obj.layout == .Packed); + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); + return func.lowerConstant(int_val, struct_obj.backing_int_ty); + }, + .Vector => { + assert(determineSimdStoreStrategy(ty, mod) == .direct); + var buf: [16]u8 = undefined; + val.writeToMemory(ty, mod, &buf) catch unreachable; + return func.storeSimdImmd(buf); + }, + .Frame, + .AnyFrame, + => return func.fail("Wasm TODO: LowerConstant for type {}", .{ty.fmt(mod)}), + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => return WValue{ .imm32 = switch (simple_value) { + .false => 0, + .true => 1, + else => unreachable, + } }, + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => { const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { @@ -3080,86 +3180,71 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, } }, - .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, - .Float => switch (ty.floatBits(func.target)) { - 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16, mod)) }, - 32 => return WValue{ .float32 = val.toFloat(f32, mod) }, - 64 => return WValue{ .float64 = val.toFloat(f64, mod) }, - else => unreachable, - }, - .Pointer => return switch (val.ip_index) { - .null_value => WValue{ .imm32 = 0 }, - .none => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => func.lowerParentPtr(val, 0), - else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| WValue{ .imm32 = @intCast(u32, int.storage.u64) }, - else => unreachable, - }, - }, - .Enum => { - const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; - const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); - return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); - }, - .ErrorSet => switch (val.tag()) { - .@"error" => { - const kv = try func.bin_file.base.options.module.?.getErrorValue(val.getError().?); - return WValue{ .imm32 = kv.value }; - }, - else => return WValue{ .imm32 = 0 }, + .err => |err| { + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + return WValue{ .imm32 = kv.value }; }, - .ErrorUnion => { + .error_union => { const error_type = ty.errorUnionSet(mod); const payload_type = ty.errorUnionPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const is_pl = val.errorUnionIsPayload(); + const is_pl = val.errorUnionIsPayload(mod); const err_val = if (!is_pl) val else try mod.intValue(error_type, 0); return func.lowerConstant(err_val, error_type); } return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, - .Optional => if (ty.optionalReprIsPayload(mod)) { + .enum_tag => |enum_tag| { + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); + }, + .float => |float| switch (float.storage) { + .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) }, + .f32 => |f32_val| return WValue{ .float32 = f32_val }, + .f64 => |f64_val| return WValue{ .float64 = f64_val }, + else => unreachable, + }, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0), + .mut_decl => |mut_decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, mut_decl.decl, 0), + .int => |int| return func.lowerConstant(int.toValue(), mod.intern_pool.typeOf(int).toType()), + .opt_payload, .elem, .field => return func.lowerParentPtr(val), + else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}), + }, + .opt => if (ty.optionalReprIsPayload(mod)) { const pl_ty = ty.optionalChild(mod); - if (val.castTag(.opt_payload)) |payload| { - return func.lowerConstant(payload.data, pl_ty); - } else if (val.isNull(mod)) { - return WValue{ .imm32 = 0 }; + if (val.optionalValue(mod)) |payload| { + return func.lowerConstant(payload, pl_ty); } else { - return func.lowerConstant(val, pl_ty); + return WValue{ .imm32 = 0 }; } } else { - const is_pl = val.tag() == .opt_payload; - return WValue{ .imm32 = @boolToInt(is_pl) }; - }, - .Struct => { - const struct_obj = mod.typeToStruct(ty).?; - assert(struct_obj.layout == .Packed); - var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer - val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; - const int_val = try mod.intValue( - struct_obj.backing_int_ty, - std.mem.readIntLittle(u64, &buf), - ); - return func.lowerConstant(int_val, struct_obj.backing_int_ty); + return WValue{ .imm32 = @boolToInt(!val.isNull(mod)) }; }, - .Vector => { - assert(determineSimdStoreStrategy(ty, mod) == .direct); - var buf: [16]u8 = undefined; - val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable; - return func.storeSimdImmd(buf); - }, - .Union => { - // in this case we have a packed union which will not be passed by reference. - const union_ty = mod.typeToUnion(ty).?; - const union_obj = val.castTag(.@"union").?.data; - const field_index = ty.unionTagFieldIndex(union_obj.tag, func.bin_file.base.options.module.?).?; - const field_ty = union_ty.fields.values()[field_index].ty; - return func.lowerConstant(union_obj.val, field_ty); + .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), + .vector_type => { + assert(determineSimdStoreStrategy(ty, mod) == .direct); + var buf: [16]u8 = undefined; + val.writeToMemory(ty, mod, &buf) catch unreachable; + return func.storeSimdImmd(buf); + }, + .struct_type, .anon_struct_type => { + const struct_obj = mod.typeToStruct(ty).?; + assert(struct_obj.layout == .Packed); + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); + return func.lowerConstant(int_val, struct_obj.backing_int_ty); + }, + else => unreachable, }, - else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), + .un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), } } @@ -3221,31 +3306,33 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { .bool_true => return 1, .bool_false => return 0, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int), - .int => |int| intStorageAsI32(int.storage), - .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int), + .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), + .int => |int| intStorageAsI32(int.storage, mod), + .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), else => unreachable, }, } switch (ty.zigTypeTag(mod)) { .ErrorSet => { - const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function + const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError(mod).?) catch unreachable; // passed invalid `Value` to function return @bitCast(i32, kv.value); }, else => unreachable, // Programmer called this function for an illegal type } } -fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index) i32 { - return intStorageAsI32(ip.indexToKey(int).int.storage); +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 { + return intStorageAsI32(ip.indexToKey(int).int.storage, mod); } -fn intStorageAsI32(storage: InternPool.Key.Int.Storage) i32 { +fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 { return switch (storage) { .i64 => |x| @intCast(i32, x), .u64 => |x| @bitCast(i32, @intCast(u32, x)), .big_int => unreachable, + .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))), }; } @@ -5514,7 +5601,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // As the names are global and the slice elements are constant, we do not have // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); - const name_ty = Type.const_slice_u8_sentinel_0; + const name_ty = Type.slice_const_u8_sentinel_0; const mod = func.bin_file.base.options.module.?; const abi_size = name_ty.abiSize(mod); @@ -6935,7 +7022,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // finish function body try writer.writeByte(std.wasm.opcode(.end)); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 77b4e6d425..4a5532a239 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -632,7 +632,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -643,6 +643,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -687,7 +688,7 @@ pub fn generate( @enumToInt(FrameIndex.stack_frame), FrameAlloc.init(.{ .size = 0, - .alignment = if (mod.align_stack_fns.get(module_fn)) |set_align_stack| + .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack| set_align_stack.alignment else 1, @@ -2760,19 +2761,18 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = src_ty.childType(mod); const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits)); - var splat_pl = Value.Payload.SubValue{ - .base = .{ .tag = .repeated }, - .data = mask_val, - }; - const splat_val = Value.initPayload(&splat_pl.base); - - const full_ty = try mod.vectorType(.{ + const splat_ty = try mod.vectorType(.{ .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), .child = elem_ty.ip_index, }); - const full_abi_size = @intCast(u32, full_ty.abiSize(mod)); + const splat_abi_size = @intCast(u32, splat_ty.abiSize(mod)); + + const splat_val = try mod.intern(.{ .aggregate = .{ + .ty = splat_ty.ip_index, + .storage = .{ .repeated_elem = mask_val.ip_index }, + } }); - const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); + const splat_mcv = try self.genTypedValue(.{ .ty = splat_ty, .val = splat_val.toValue() }); const splat_addr_mcv: MCValue = switch (splat_mcv) { .memory, .indirect, .load_frame => splat_mcv.address(), else => .{ .register = try self.copyToTmpRegister(Type.usize, splat_mcv.address()) }, @@ -2784,14 +2784,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { .{ .vp_, .@"and" }, dst_reg, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), ); try self.asmRegisterRegisterRegister(mir_tag, dst_reg, dst_reg, dst_reg); } else { try self.asmRegisterMemory( .{ .p_, .@"and" }, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), ); try self.asmRegisterRegister(mir_tag, dst_reg, dst_reg); } @@ -4893,23 +4893,14 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - var arena = std.heap.ArenaAllocator.init(self.gpa); - defer arena.deinit(); - - const ExpectedContents = struct { - repeated: Value.Payload.SubValue, - }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - const vec_ty = try mod.vectorType(.{ .len = @divExact(abi_size * 8, scalar_bits), .child = (try mod.intType(.signed, scalar_bits)).ip_index, }); const sign_val = switch (tag) { - .neg => try vec_ty.minInt(stack.get(), mod), - .fabs => try vec_ty.maxInt(stack.get(), mod, vec_ty), + .neg => try vec_ty.minInt(mod), + .fabs => try vec_ty.maxInt(mod, vec_ty), else => unreachable, }; @@ -8106,13 +8097,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. if (try self.air.value(callee, mod)) |func_value| { - if (if (func_value.castTag(.function)) |func_payload| - func_payload.data.owner_decl - else if (func_value.castTag(.decl_ref)) |decl_ref_payload| - decl_ref_payload.data - else - null) |owner_decl| - { + const func_key = mod.intern_pool.indexToKey(func_value.ip_index); + if (switch (func_key) { + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => null, + }, + else => null, + }) |owner_decl| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(owner_decl); const atom = elf_file.getAtom(atom_index); @@ -8145,10 +8138,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .disp = @intCast(i32, fn_got_addr), })); } else unreachable; - } else if (func_value.castTag(.extern_fn)) |func_payload| { - const extern_fn = func_payload.data; - const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); - const lib_name = mem.sliceTo(extern_fn.lib_name, 0); + } else if (func_value.getExternFunc(mod)) |extern_func| { + const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0); + const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = try self.owner.getSymbolIndex(self); const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); @@ -8554,7 +8546,8 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.bin_file.options.module.?; + const function = self.air.values[ty_pl.payload].getFunction(mod).?; // TODO emit debug info for function change _ = function; return self.finishAir(inst, .unreach, .{ .none, .none, .none }); diff --git a/src/codegen.zig b/src/codegen.zig index 775eb09ab0..b9b7dac90f 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -14,6 +14,7 @@ const Air = @import("Air.zig"); const Allocator = mem.Allocator; const Compilation = @import("Compilation.zig"); const ErrorMsg = Module.ErrorMsg; +const InternPool = @import("InternPool.zig"); const Liveness = @import("Liveness.zig"); const Module = @import("Module.zig"); const Target = std.Target; @@ -66,7 +67,7 @@ pub const DebugInfoOutput = union(enum) { pub fn generateFunction( bin_file: *link.File, src_loc: Module.SrcLoc, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -75,17 +76,17 @@ pub fn generateFunction( switch (bin_file.options.target.cpu.arch) { .arm, .armeb, - => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), .aarch64, .aarch64_be, .aarch64_32, - => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), .wasm32, .wasm64, - => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), else => unreachable, } } @@ -182,12 +183,13 @@ pub fn generateSymbol( const tracy = trace(@src()); defer tracy.end(); + const mod = bin_file.options.module.?; var typed_value = arg_tv; - if (arg_tv.val.castTag(.runtime_value)) |rt| { - typed_value.val = rt.data; + switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .runtime_value => |rt| typed_value.val = rt.val.toValue(), + else => {}, } - const mod = bin_file.options.module.?; const target = mod.getTarget(); const endian = target.cpu.arch.endian(); @@ -199,35 +201,10 @@ pub fn generateSymbol( if (typed_value.val.isUndefDeep(mod)) { const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); - return Result.ok; + return .ok; } - switch (typed_value.ty.zigTypeTag(mod)) { - .Fn => { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol function pointers", - .{}, - ), - }; - }, - .Float => { - switch (typed_value.ty.floatBits(target)) { - 16 => writeFloat(f16, typed_value.val.toFloat(f16, mod), target, endian, try code.addManyAsArray(2)), - 32 => writeFloat(f32, typed_value.val.toFloat(f32, mod), target, endian, try code.addManyAsArray(4)), - 64 => writeFloat(f64, typed_value.val.toFloat(f64, mod), target, endian, try code.addManyAsArray(8)), - 80 => { - writeFloat(f80, typed_value.val.toFloat(f80, mod), target, endian, try code.addManyAsArray(10)); - const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - try code.appendNTimes(0, abi_size - 10); - }, - 128 => writeFloat(f128, typed_value.val.toFloat(f128, mod), target, endian, try code.addManyAsArray(16)), - else => unreachable, - } - return Result.ok; - }, + if (typed_value.val.ip_index == .none) switch (typed_value.ty.zigTypeTag(mod)) { .Array => switch (typed_value.val.tag()) { .bytes => { const bytes = typed_value.val.castTag(.bytes).?.data; @@ -248,62 +225,6 @@ pub fn generateSymbol( } return Result.ok; }, - .aggregate => { - const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.childType(mod); - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); - for (elem_vals[0..len]) |elem_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = elem_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - return Result.ok; - }, - .repeated => { - const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(mod); - const sentinel = typed_value.ty.sentinel(mod); - const len = typed_value.ty.arrayLen(mod); - - var index: u64 = 0; - while (index < len) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = array, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - if (sentinel) |sentinel_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = sentinel_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - return Result.ok; - }, - .empty_array_sentinel => { - const elem_ty = typed_value.ty.childType(mod); - const sentinel_val = typed_value.ty.sentinel(mod).?; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = sentinel_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - return Result.ok; - }, else => return Result{ .fail = try ErrorMsg.create( bin_file.allocator, @@ -313,195 +234,6 @@ pub fn generateSymbol( ), }, }, - .Pointer => switch (typed_value.val.ip_index) { - .null_value => { - switch (target.ptrBitWidth()) { - 32 => { - mem.writeInt(u32, try code.addManyAsArray(4), 0, endian); - if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 4); - }, - 64 => { - mem.writeInt(u64, try code.addManyAsArray(8), 0, endian); - if (typed_value.ty.isSlice(mod)) try code.appendNTimes(0xaa, 8); - }, - else => unreachable, - } - return Result.ok; - }, - .none => switch (typed_value.val.tag()) { - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( - bin_file, - src_loc, - typed_value, - switch (tag) { - .variable => typed_value.val.castTag(.variable).?.data.owner_decl, - .decl_ref => typed_value.val.castTag(.decl_ref).?.data, - .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }, - code, - debug_output, - reloc_info, - ), - .slice => { - const slice = typed_value.val.castTag(.slice).?.data; - - // generate ptr - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = slice.ptr, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - // generate length - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.usize, - .val = slice.len, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - return Result.ok; - }, - .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr( - bin_file, - src_loc, - typed_value, - typed_value.val, - code, - debug_output, - reloc_info, - ), - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for pointer type value: '{s}'", - .{@tagName(typed_value.val.tag())}, - ), - }, - }, - else => switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { - .int => { - switch (target.ptrBitWidth()) { - 32 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); - }, - 64 => { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - }, - else => unreachable, - } - return Result.ok; - }, - else => unreachable, - }, - }, - .Int => { - const info = typed_value.ty.intInfo(mod); - if (info.bits <= 8) { - const x: u8 = switch (info.signedness) { - .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(mod)), - .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(mod))), - }; - try code.append(x); - return Result.ok; - } - if (info.bits > 64) { - var bigint_buffer: Value.BigIntSpace = undefined; - const bigint = typed_value.val.toBigInt(&bigint_buffer, mod); - const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - const start = code.items.len; - try code.resize(start + abi_size); - bigint.writeTwosComplement(code.items[start..][0..abi_size], endian); - return Result.ok; - } - switch (info.signedness) { - .unsigned => { - if (info.bits <= 16) { - const x = @intCast(u16, typed_value.val.toUnsignedInt(mod)); - mem.writeInt(u16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(u32, typed_value.val.toUnsignedInt(mod)); - mem.writeInt(u32, try code.addManyAsArray(4), x, endian); - } else { - const x = typed_value.val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - } - }, - .signed => { - if (info.bits <= 16) { - const x = @intCast(i16, typed_value.val.toSignedInt(mod)); - mem.writeInt(i16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(i32, typed_value.val.toSignedInt(mod)); - mem.writeInt(i32, try code.addManyAsArray(4), x, endian); - } else { - const x = typed_value.val.toSignedInt(mod); - mem.writeInt(i64, try code.addManyAsArray(8), x, endian); - } - }, - } - return Result.ok; - }, - .Enum => { - const int_val = try typed_value.enumToInt(mod); - - const info = typed_value.ty.intInfo(mod); - if (info.bits <= 8) { - const x = @intCast(u8, int_val.toUnsignedInt(mod)); - try code.append(x); - return Result.ok; - } - if (info.bits > 64) { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for big int enums ('{}')", - .{typed_value.ty.fmt(mod)}, - ), - }; - } - switch (info.signedness) { - .unsigned => { - if (info.bits <= 16) { - const x = @intCast(u16, int_val.toUnsignedInt(mod)); - mem.writeInt(u16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(u32, int_val.toUnsignedInt(mod)); - mem.writeInt(u32, try code.addManyAsArray(4), x, endian); - } else { - const x = int_val.toUnsignedInt(mod); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - } - }, - .signed => { - if (info.bits <= 16) { - const x = @intCast(i16, int_val.toSignedInt(mod)); - mem.writeInt(i16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(i32, int_val.toSignedInt(mod)); - mem.writeInt(i32, try code.addManyAsArray(4), x, endian); - } else { - const x = int_val.toSignedInt(mod); - mem.writeInt(i64, try code.addManyAsArray(8), x, endian); - } - }, - } - return Result.ok; - }, - .Bool => { - const x: u8 = @boolToInt(typed_value.val.toBool(mod)); - try code.append(x); - return Result.ok; - }, .Struct => { if (typed_value.ty.containerLayout(mod) == .Packed) { const struct_obj = mod.typeToStruct(typed_value.ty).?; @@ -562,370 +294,497 @@ pub fn generateSymbol( return Result.ok; }, - .Union => { - const union_obj = typed_value.val.castTag(.@"union").?.data; - const layout = typed_value.ty.unionGetLayout(mod); + .Vector => switch (typed_value.val.tag()) { + .bytes => { + const bytes = typed_value.val.castTag(.bytes).?.data; + const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse + return error.Overflow; + try code.ensureUnusedCapacity(len + padding); + code.appendSliceAssumeCapacity(bytes[0..len]); + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + return Result.ok; + }, + .str_lit => { + const str_lit = typed_value.val.castTag(.str_lit).?.data; + const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse + return error.Overflow; + try code.ensureUnusedCapacity(str_lit.len + padding); + code.appendSliceAssumeCapacity(bytes); + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + return Result.ok; + }, + else => unreachable, + }, + .Frame, + .AnyFrame, + => return .{ .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO generateSymbol for type {}", + .{typed_value.ty.fmt(mod)}, + ) }, + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; - if (layout.payload_size == 0) { - return generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType(mod).?, - .val = union_obj.tag, - }, code, debug_output, reloc_info); + switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try code.append(switch (simple_value) { + .false => 0, + .true => 1, + else => unreachable, + }), + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => { + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + var space: Value.BigIntSpace = undefined; + const val = typed_value.val.toBigInt(&space, mod); + val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); + }, + .err => |err| { + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + try code.writer().writeInt(u16, @intCast(u16, kv.value), endian); + }, + .error_union => |error_union| { + const payload_ty = typed_value.ty.errorUnionPayload(mod); + + const err_val = switch (error_union.val) { + .err_name => |err_name| @intCast(u16, (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value), + .payload => @as(u16, 0), + }; + + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + try code.writer().writeInt(u16, err_val, endian); + return .ok; } - // Check if we should store the tag first. - if (layout.tag_align >= layout.payload_align) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType(mod).?, - .val = union_obj.tag, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_align = typed_value.ty.abiAlignment(mod); + + // error value first when its type is larger than the error union's payload + if (error_align > payload_align) { + try code.writer().writeInt(u16, err_val, endian); } - const union_ty = mod.typeToUnion(typed_value.ty).?; - const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?; - assert(union_ty.haveFieldTypes()); - const field_ty = union_ty.fields.values()[field_index].ty; - if (!field_ty.hasRuntimeBits(mod)) { - try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); - } else { + // emit payload part of the error union + { + const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = union_obj.val, + .ty = payload_ty, + .val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, - .fail => |em| return Result{ .fail = em }, + .fail => |em| return .{ .fail = em }, } + const unpadded_end = code.items.len - begin; + const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; - const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } } - if (layout.tag_size > 0) { + // Payload size is larger than error set, so emit our error set last + if (error_align <= payload_align) { + const begin = code.items.len; + try code.writer().writeInt(u16, err_val, endian); + const unpadded_end = code.items.len - begin; + const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + }, + .enum_tag => |enum_tag| { + const int_tag_ty = try typed_value.ty.intTagType(mod); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = int_tag_ty, + .val = (try mod.intern_pool.getCoerced(mod.gpa, enum_tag.int, int_tag_ty.ip_index)).toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + }, + .float => |float| switch (float.storage) { + .f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(2)), + .f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(4)), + .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)), + .f80 => |f80_val| { + writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10)); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + try code.appendNTimes(0, abi_size - 10); + }, + .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)), + }, + .ptr => |ptr| { + // generate ptr + switch (try lowerParentPtr(bin_file, src_loc, switch (ptr.len) { + .none => typed_value.val, + else => typed_value.val.slicePtr(mod), + }.ip_index, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + if (ptr.len != .none) { + // generate len switch (try generateSymbol(bin_file, src_loc, .{ - .ty = union_ty.tag_ty, - .val = union_obj.tag, + .ty = Type.usize, + .val = ptr.len.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } } - - if (layout.padding > 0) { - try code.writer().writeByteNTimes(0, layout.padding); - } - - return Result.ok; }, - .Optional => { + .opt => { const payload_type = typed_value.ty.optionalChild(mod); - const is_pl = !typed_value.val.isNull(mod); + const payload_val = typed_value.val.optionalValue(mod); const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - if (!payload_type.hasRuntimeBits(mod)) { - try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size); - return Result.ok; - } - if (typed_value.ty.optionalReprIsPayload(mod)) { - if (typed_value.val.castTag(.opt_payload)) |payload| { + if (payload_val) |value| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, - .val = payload.data, + .val = value, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - } else if (!typed_value.val.isNull(mod)) { + } else { + try code.writer().writeByteNTimes(0, abi_size); + } + } else { + const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; + if (payload_type.hasRuntimeBits(mod)) { + const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.ip_index })).toValue(); switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, - .val = typed_value.val, + .val = value, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - } else { - try code.writer().writeByteNTimes(0, abi_size); } - - return Result.ok; + try code.writer().writeByte(@boolToInt(payload_val != null)); + try code.writer().writeByteNTimes(0, padding); } + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.ip_index)) { + .array_type => |array_type| { + var index: u64 = 0; + while (index < array_type.len) : (index += 1) { + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + }, + } + } - const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; - const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.undef; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_type, - .val = value, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - try code.writer().writeByte(@boolToInt(is_pl)); - try code.writer().writeByteNTimes(0, padding); + if (array_type.sentinel != .none) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = array_type.sentinel.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, + .vector_type => |vector_type| { + var index: u32 = 0; + while (index < vector_type.len) : (index += 1) { + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{ + .ty = vector_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + }, + } + } - return Result.ok; + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, vector_type.child.toType().bitSize(mod) * vector_type.len, 8) catch |err| switch (err) { + error.DivisionByZero => unreachable, + else => |e| return e, + })) orelse return error.Overflow; + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + }, + .struct_type, .anon_struct_type => { + if (typed_value.ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(typed_value.ty).?; + const fields = struct_obj.fields.values(); + const field_vals = typed_value.val.castTag(.aggregate).?.data; + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + const current_pos = code.items.len; + try code.resize(current_pos + abi_size); + var bits: u16 = 0; + + for (field_vals, 0..) |field_val, index| { + const field_ty = fields[index].ty; + // pointer may point to a decl which must be marked used + // but can also result in a relocation. Therefore we handle those seperately. + if (field_ty.zigTypeTag(mod) == .Pointer) { + const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow; + var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); + defer tmp_list.deinit(); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = field_val, + }, &tmp_list, debug_output, reloc_info)) { + .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), + .fail => |em| return Result{ .fail = em }, + } + } else { + field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; + } + bits += @intCast(u16, field_ty.bitSize(mod)); + } + } else { + const struct_begin = code.items.len; + const field_vals = typed_value.val.castTag(.aggregate).?.data; + for (field_vals, 0..) |field_val, index| { + const field_ty = typed_value.ty.structFieldType(index, mod); + if (!field_ty.hasRuntimeBits(mod)) continue; + + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = field_val, + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + const unpadded_field_end = code.items.len - struct_begin; + + // Pad struct members if required + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); + const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + } + }, + else => unreachable, }, - .ErrorUnion => { - const error_ty = typed_value.ty.errorUnionSet(mod); - const payload_ty = typed_value.ty.errorUnionPayload(mod); - const is_payload = typed_value.val.errorUnionIsPayload(); + .un => |un| { + const layout = typed_value.ty.unionGetLayout(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - const err_val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val; + if (layout.payload_size == 0) { return generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = err_val, + .ty = typed_value.ty.unionTagType(mod).?, + .val = un.tag.toValue(), }, code, debug_output, reloc_info); } - const payload_align = payload_ty.abiAlignment(mod); - const error_align = Type.anyerror.abiAlignment(mod); - const abi_align = typed_value.ty.abiAlignment(mod); - - // error value first when its type is larger than the error union's payload - if (error_align > payload_align) { + // Check if we should store the tag first. + if (layout.tag_align >= layout.payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, + .ty = typed_value.ty.unionTagType(mod).?, + .val = un.tag.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } } - // emit payload part of the error union - { - const begin = code.items.len; - const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.undef; + const union_ty = mod.typeToUnion(typed_value.ty).?; + const field_index = typed_value.ty.unionTagFieldIndex(un.tag.toValue(), mod).?; + assert(union_ty.haveFieldTypes()); + const field_ty = union_ty.fields.values()[field_index].ty; + if (!field_ty.hasRuntimeBits(mod)) { + try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); + } else { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_ty, - .val = payload_val, + .ty = field_ty, + .val = un.val.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); - const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; + const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; if (padding > 0) { try code.writer().writeByteNTimes(0, padding); } } - // Payload size is larger than error set, so emit our error set last - if (error_align <= payload_align) { - const begin = code.items.len; + if (layout.tag_size > 0) { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = if (is_payload) try mod.intValue(error_ty, 0) else typed_value.val, + .ty = union_ty.tag_ty, + .val = un.tag.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } - const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); - const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; - - if (padding > 0) { - try code.writer().writeByteNTimes(0, padding); - } - } - - return Result.ok; - }, - .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const name = typed_value.val.getError().?; - const kv = try bin_file.options.module.?.getErrorValue(name); - try code.writer().writeInt(u32, kv.value, endian); - }, - else => { - try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(mod))); - }, } - return Result.ok; }, - .Vector => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(len + padding); - code.appendSliceAssumeCapacity(bytes[0..len]); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .aggregate => { - const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.childType(mod); - const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { - error.DivisionByZero => unreachable, - else => |e| return e, - })) orelse return error.Overflow; - for (elem_vals[0..len]) |elem_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = elem_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .repeated => { - const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(mod); - const len = typed_value.ty.arrayLen(mod); - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - - (math.divCeil(u64, elem_ty.bitSize(mod) * len, 8) catch |err| switch (err) { - error.DivisionByZero => unreachable, - else => |e| return e, - })) orelse return error.Overflow; - var index: u64 = 0; - while (index < len) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = array, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(str_lit.len + padding); - code.appendSliceAssumeCapacity(bytes); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - else => unreachable, - }, - else => |tag| return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for type '{s}'", - .{@tagName(tag)}, - ) }, } + return .ok; } fn lowerParentPtr( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, - parent_ptr: Value, + parent_ptr: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { const mod = bin_file.options.module.?; - switch (parent_ptr.tag()) { - .field_ptr => { - const field_ptr = parent_ptr.castTag(.field_ptr).?.data; + const ptr = mod.intern_pool.indexToKey(parent_ptr).ptr; + assert(ptr.len == .none); + return switch (ptr.addr) { + .decl, .mut_decl => try lowerDeclRef( + bin_file, + src_loc, + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }, + code, + debug_output, + reloc_info, + ), + .int => |int| try generateSymbol(bin_file, src_loc, .{ + .ty = Type.usize, + .val = int.toValue(), + }, code, debug_output, reloc_info), + .eu_payload => |eu_payload| try lowerParentPtr( + bin_file, + src_loc, + eu_payload, + code, + debug_output, + reloc_info.offset(@intCast(u32, errUnionPayloadOffset( + mod.intern_pool.typeOf(eu_payload).toType(), + mod, + ))), + ), + .opt_payload => |opt_payload| try lowerParentPtr( + bin_file, + src_loc, + opt_payload, + code, + debug_output, + reloc_info, + ), + .elem => |elem| try lowerParentPtr( + bin_file, + src_loc, + elem.base, + code, + debug_output, + reloc_info.offset(@intCast(u32, elem.index * + mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))), + ), + .field => |field| { + const base_type = mod.intern_pool.typeOf(field.base); return lowerParentPtr( bin_file, src_loc, - typed_value, - field_ptr.container_ptr, + field.base, code, debug_output, - reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag(mod)) { - .Pointer => offset: { - assert(field_ptr.container_ty.isSlice(mod)); - break :offset switch (field_ptr.field_index) { + reloc_info.offset(switch (mod.intern_pool.indexToKey(base_type)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .One, .Many, .C => unreachable, + .Slice => switch (field.index) { 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(mod).abiSize(mod), + 1 => @divExact(mod.getTarget().ptrBitWidth(), 8), else => unreachable, - }; + }, }, - .Struct, .Union => field_ptr.container_ty.structFieldOffset( - field_ptr.field_index, + .struct_type, + .anon_struct_type, + .union_type, + => @intCast(u32, base_type.toType().childType(mod).structFieldOffset( + @intCast(u32, field.index), mod, - ), - else => return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement lowerParentPtr for field_ptr with a container of type {}", - .{field_ptr.container_ty.fmt(bin_file.options.module.?)}, - ) }, - })), - ); - }, - .elem_ptr => { - const elem_ptr = parent_ptr.castTag(.elem_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - elem_ptr.array_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(mod))), - ); - }, - .opt_payload_ptr => { - const opt_payload_ptr = parent_ptr.castTag(.opt_payload_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - opt_payload_ptr.container_ptr, - code, - debug_output, - reloc_info, - ); - }, - .eu_payload_ptr => { - const eu_payload_ptr = parent_ptr.castTag(.eu_payload_ptr).?.data; - const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod); - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - eu_payload_ptr.container_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, mod))), + )), + else => unreachable, + }), ); }, - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( - bin_file, - src_loc, - typed_value, - switch (tag) { - .variable => parent_ptr.castTag(.variable).?.data.owner_decl, - .decl_ref => parent_ptr.castTag(.decl_ref).?.data, - .decl_ref_mut => parent_ptr.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }, - code, - debug_output, - reloc_info, - ), - else => |tag| return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement lowerParentPtr for type '{s}'", - .{@tagName(tag)}, - ) }, - } + .comptime_field => unreachable, + }; } const RelocInfo = struct { @@ -940,36 +799,15 @@ const RelocInfo = struct { fn lowerDeclRef( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, decl_index: Module.Decl.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { + _ = src_loc; + _ = debug_output; const target = bin_file.options.target; const mod = bin_file.options.module.?; - if (typed_value.ty.isSlice(mod)) { - // generate ptr - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(mod); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - // generate length - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.usize, - .val = try mod.intValue(Type.usize, typed_value.val.sliceLen(mod)), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - return Result.ok; - } const ptr_width = target.ptrBitWidth(); const decl = mod.declPtr(decl_index); @@ -1154,12 +992,13 @@ pub fn genTypedValue( arg_tv: TypedValue, owner_decl_index: Module.Decl.Index, ) CodeGenError!GenResult { + const mod = bin_file.options.module.?; var typed_value = arg_tv; - if (typed_value.val.castTag(.runtime_value)) |rt| { - typed_value.val = rt.data; + switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .runtime_value => |rt| typed_value.val = rt.val.toValue(), + else => {}, } - const mod = bin_file.options.module.?; log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmt(mod), typed_value.val.fmtValue(typed_value.ty, mod), @@ -1171,17 +1010,14 @@ pub fn genTypedValue( const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); - if (!typed_value.ty.isSlice(mod)) { - if (typed_value.val.castTag(.variable)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.owner_decl); - } - if (typed_value.val.castTag(.decl_ref)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data); - } - if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index); - } - } + if (!typed_value.ty.isSlice(mod)) switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| return genDeclRef(bin_file, src_loc, typed_value, decl), + .mut_decl => |mut_decl| return genDeclRef(bin_file, src_loc, typed_value, mut_decl.decl), + else => {}, + }, + else => {}, + }; switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), @@ -1215,11 +1051,9 @@ pub fn genTypedValue( }, .Optional => { if (typed_value.ty.isPtrLikeOptional(mod)) { - if (typed_value.val.ip_index == .null_value) return GenResult.mcv(.{ .immediate = 0 }); - return genTypedValue(bin_file, src_loc, .{ .ty = typed_value.ty.optionalChild(mod), - .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, + .val = typed_value.val.optionalValue(mod) orelse return GenResult.mcv(.{ .immediate = 0 }), }, owner_decl_index); } else if (typed_value.ty.abiSize(mod) == 1) { return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull(mod)) }); @@ -1234,24 +1068,15 @@ pub fn genTypedValue( }, owner_decl_index); }, .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return GenResult.mcv(.{ .immediate = error_index }); - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return GenResult.mcv(.{ .immediate = 0 }); - }, - } + const err_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(typed_value.val.ip_index).err.name); + const global_error_set = mod.global_error_set; + const error_index = global_error_set.get(err_name).?; + return GenResult.mcv(.{ .immediate = error_index }); }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(mod); const payload_type = typed_value.ty.errorUnionPayload(mod); - const is_pl = typed_value.val.errorUnionIsPayload(); + const is_pl = typed_value.val.errorUnionIsPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d3b8e06e5d..1bb8130b1f 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -257,7 +257,7 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { return .{ .data = ident }; } -/// This data is available when outputting .c code for a `*Module.Fn`. +/// This data is available when outputting .c code for a `Module.Fn.Index`. /// It is not available when generating .h file. pub const Function = struct { air: Air, @@ -268,7 +268,7 @@ pub const Function = struct { next_block_index: usize = 0, object: Object, lazy_fns: LazyFnMap, - func: *Module.Fn, + func_index: Module.Fn.Index, /// All the locals, to be emitted at the top of the function. locals: std.ArrayListUnmanaged(Local) = .{}, /// Which locals are available for reuse, based on Type. @@ -549,33 +549,12 @@ pub const DeclGen = struct { } // Chase function values in order to be able to reference the original function. - inline for (.{ .function, .extern_fn }) |tag| - if (decl.val.castTag(tag)) |func| - if (func.data.owner_decl != decl_index) - return dg.renderDeclValue(writer, ty, val, func.data.owner_decl, location); + if (decl.getFunction(mod)) |func| if (func.owner_decl != decl_index) + return dg.renderDeclValue(writer, ty, val, func.owner_decl, location); + if (decl.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index) + return dg.renderDeclValue(writer, ty, val, extern_func.decl, location); - if (decl.val.castTag(.variable)) |var_payload| - try dg.renderFwdDecl(decl_index, var_payload.data); - - if (ty.isSlice(mod)) { - if (location == .StaticInitializer) { - try writer.writeByte('{'); - } else { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeAll("){ .ptr = "); - } - - try dg.renderValue(writer, ty.slicePtrFieldType(mod), val.slicePtr(mod), .Initializer); - - const len_val = try mod.intValue(Type.usize, val.sliceLen(mod)); - - if (location == .StaticInitializer) { - return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); - } else { - return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); - } - } + if (decl.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable); // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function @@ -594,125 +573,77 @@ pub const DeclGen = struct { /// Renders a "parent" pointer by recursing to the root decl/variable /// that its contents are defined with respect to. - /// - /// Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr fn renderParentPtr( dg: *DeclGen, writer: anytype, - ptr_val: Value, - ptr_ty: Type, + ptr_val: InternPool.Index, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { const mod = dg.module; - - if (!ptr_ty.isSlice(mod)) { - try writer.writeByte('('); - try dg.renderType(writer, ptr_ty); - try writer.writeByte(')'); - } - if (ptr_val.ip_index != .none) switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { - .int => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), - else => unreachable, - }; - switch (ptr_val.tag()) { - .decl_ref_mut, .decl_ref, .variable => { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, - .variable => ptr_val.castTag(.variable).?.data.owner_decl, + const ptr_ty = mod.intern_pool.typeOf(ptr_val).toType(); + const ptr = mod.intern_pool.indexToKey(ptr_val).ptr; + switch (ptr.addr) { + .decl, .mut_decl => try dg.renderDeclValue( + writer, + ptr_ty, + ptr_val.toValue(), + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, - }; - try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location); + }, + location, + ), + .int => |int| try writer.print("{x}", .{ + try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other), + }), + .eu_payload, .opt_payload => |base| { + const base_ty = mod.intern_pool.typeOf(base).toType().childType(mod); + // Ensure complete type definition is visible before accessing fields. + _ = try dg.typeToIndex(base_ty, .complete); + try writer.writeAll("&("); + try dg.renderParentPtr(writer, base, location); + try writer.writeAll(")->payload"); }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - + .elem => |elem| { + try writer.writeAll("&("); + try dg.renderParentPtr(writer, elem.base, location); + try writer.print(")[{d}]", .{elem.index}); + }, + .field => |field| { + const base_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(field_ptr.container_ty, .complete); - - const container_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, field_ptr.container_ty); - - switch (fieldLocation( - field_ptr.container_ty, - ptr_ty, - @intCast(u32, field_ptr.field_index), - mod, - )) { - .begin => try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ), - .field => |field| { + _ = try dg.typeToIndex(base_ty, .complete); + switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) { + .begin => try dg.renderParentPtr(writer, field.base, location), + .field => |name| { try writer.writeAll("&("); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.writeAll(")->"); - try dg.writeCValue(writer, field); + try dg.writeCValue(writer, name); }, .byte_offset => |byte_offset| { const u8_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, Type.u8); - const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try dg.renderType(writer, u8_ptr_ty); try writer.writeByte(')'); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.print(" + {})", .{ try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other), }); }, .end => { try writer.writeAll("(("); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.print(") + {})", .{ try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other), }); }, } }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const elem_ptr_ty = try mod.ptrType(.{ - .size = .C, - .elem_type = elem_ptr.elem_ty.ip_index, - }); - - try writer.writeAll("&("); - try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location); - try writer.print(")[{d}]", .{elem_ptr.index}); - }, - .opt_payload_ptr, .eu_payload_ptr => { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - const container_ptr_ty = try mod.ptrType(.{ - .elem_type = payload_ptr.container_ty.ip_index, - .size = .C, - }); - - // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(payload_ptr.container_ty, .complete); - - try writer.writeAll("&("); - try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty, location); - try writer.writeAll(")->payload"); - }, - else => unreachable, + .comptime_field => unreachable, } } @@ -723,11 +654,12 @@ pub const DeclGen = struct { arg_val: Value, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { + const mod = dg.module; var val = arg_val; - if (val.castTag(.runtime_value)) |rt| { - val = rt.data; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, } - const mod = dg.module; const target = mod.getTarget(); const initializer_type: ValueRenderLocation = switch (location) { .StaticInitializer => .StaticInitializer, @@ -928,175 +860,8 @@ pub const DeclGen = struct { } unreachable; } - switch (ty.zigTypeTag(mod)) { - .Int => switch (val.tag()) { - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), - else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), - }, - .Float => { - const bits = ty.floatBits(target); - const f128_val = val.toFloat(f128, mod); - - // All unsigned ints matching float types are pre-allocated. - const repr_ty = mod.intType(.unsigned, bits) catch unreachable; - - assert(bits <= 128); - var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; - var repr_val_big = BigInt.Mutable{ - .limbs = &repr_val_limbs, - .len = undefined, - .positive = undefined, - }; - switch (bits) { - 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), - 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), - 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), - 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), - 128 => repr_val_big.set(@bitCast(u128, f128_val)), - else => unreachable, - } - - const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); - - try writer.writeAll("zig_cast_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte(' '); - var empty = true; - if (std.math.isFinite(f128_val)) { - try writer.writeAll("zig_make_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), - 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), - 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), - 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), - 128 => try writer.print("{x}", .{f128_val}), - else => unreachable, - } - try writer.writeAll(", "); - empty = false; - } else { - // isSignalNan is equivalent to isNan currently, and MSVC doens't have nans, so prefer nan - const operation = if (std.math.isNan(f128_val)) - "nan" - else if (std.math.isSignalNan(f128_val)) - "nans" - else if (std.math.isInf(f128_val)) - "inf" - else - unreachable; - - if (location == .StaticInitializer) { - if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val)) - return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{}); - - // MSVC doesn't have a way to define a custom or signaling NaN value in a constant expression - - // TODO: Re-enable this check, otherwise we're writing qnan bit patterns on msvc incorrectly - // if (std.math.isNan(f128_val) and f128_val != std.math.qnan_f128) - // return dg.fail("Only quiet nans are supported in global variable initializers", .{}); - } - - try writer.writeAll("zig_"); - try writer.writeAll(if (location == .StaticInitializer) "init" else "make"); - try writer.writeAll("_special_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - if (std.math.signbit(f128_val)) try writer.writeByte('-'); - try writer.writeAll(", "); - try writer.writeAll(operation); - try writer.writeAll(", "); - if (std.math.isNan(f128_val)) switch (bits) { - // We only actually need to pass the significand, but it will get - // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), - 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), - 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), - 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), - 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), - else => unreachable, - }; - try writer.writeAll(", "); - empty = false; - } - try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)}); - if (!empty) try writer.writeByte(')'); - return; - }, - .Pointer => switch (val.ip_index) { - .null_value => if (ty.isSlice(mod)) { - var slice_pl = Value.Payload.Slice{ - .base = .{ .tag = .slice }, - .data = .{ .ptr = val, .len = Value.undef }, - }; - const slice_val = Value.initPayload(&slice_pl.base); - - return dg.renderValue(writer, ty, slice_val, location); - } else { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - try writer.writeAll(")NULL)"); - }, - .none => switch (val.tag()) { - .variable => { - const decl = val.castTag(.variable).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - .slice => { - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const slice = val.castTag(.slice).?.data; - - try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(mod), slice.ptr, initializer_type); - try writer.writeAll(", "); - try dg.renderValue(writer, Type.usize, slice.len, initializer_type); - try writer.writeByte('}'); - }, - .function => { - const func = val.castTag(.function).?.data; - try dg.renderDeclName(writer, func.owner_decl, 0); - }, - .extern_fn => { - const extern_fn = val.castTag(.extern_fn).?.data; - try dg.renderDeclName(writer, extern_fn.owner_decl, 0); - }, - .lazy_align, .lazy_size => { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); - }, - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); - }, - else => unreachable, - }, - }, + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { .Array, .Vector => { if (location == .FunctionArgument) { try writer.writeByte('('); @@ -1129,17 +894,6 @@ pub const DeclGen = struct { return; }, .none => switch (val.tag()) { - .empty_array => { - const ai = ty.arrayInfo(mod); - try writer.writeByte('{'); - if (ai.sentinel) |s| { - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } else { - try writer.writeByte('0'); - } - try writer.writeByte('}'); - return; - }, .bytes, .str_lit => |t| { const bytes = switch (t) { .bytes => val.castTag(.bytes).?.data, @@ -1210,91 +964,6 @@ pub const DeclGen = struct { try writer.writeByte('}'); } }, - .Bool => { - if (val.toBool(mod)) { - return writer.writeAll("true"); - } else { - return writer.writeAll("false"); - } - }, - .Optional => { - const payload_ty = ty.optionalChild(mod); - - const is_null_val = Value.makeBool(val.ip_index == .null_value); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) - return dg.renderValue(writer, Type.bool, is_null_val, location); - - if (ty.optionalReprIsPayload(mod)) { - const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val; - return dg.renderValue(writer, payload_ty, payload_val, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else Value.undef; - - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); - try writer.writeAll(", .is_null = "); - try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); - try writer.writeAll(" }"); - }, - .ErrorSet => { - if (val.castTag(.@"error")) |error_pl| { - // Error values are already defined by genErrDecls. - try writer.print("zig_error_{}", .{fmtIdent(error_pl.data.name)}); - } else { - try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, .Other)}); - } - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - const error_ty = ty.errorUnionSet(mod); - const error_val = if (val.errorUnionIsPayload()) try mod.intValue(Type.anyerror, 0) else val; - - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return dg.renderValue(writer, error_ty, error_val, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); - try writer.writeAll(", .error = "); - try dg.renderValue(writer, error_ty, error_val, initializer_type); - try writer.writeAll(" }"); - }, - .Enum => switch (val.ip_index) { - .none => { - const int_tag_ty = try ty.intTagType(mod); - return dg.renderValue(writer, int_tag_ty, val, location); - }, - else => { - const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; - const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); - return dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location); - }, - }, - .Fn => switch (val.tag()) { - .function => { - const decl = val.castTag(.function).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - .extern_fn => { - const decl = val.castTag(.extern_fn).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - else => unreachable, - }, .Struct => switch (ty.containerLayout(mod)) { .Auto, .Extern => { const field_vals = val.castTag(.aggregate).?.data; @@ -1408,7 +1077,448 @@ pub const DeclGen = struct { } }, }, - .Union => { + + .Frame, + .AnyFrame, + => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ + @tagName(tag), + }), + + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try writer.writeAll(@tagName(simple_value)), + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), + .lazy_align, .lazy_size => { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + }, + }, + .err => |err| try writer.print("zig_error_{}", .{ + fmtIdent(mod.intern_pool.stringToSlice(err.name)), + }), + .error_union => |error_union| { + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); + const error_val = if (val.errorUnionIsPayload(mod)) try mod.intValue(Type.anyerror, 0) else val; + + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return dg.renderValue(writer, error_ty, error_val, location); + } + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + const payload_val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(); + + try writer.writeAll("{ .payload = "); + try dg.renderValue(writer, payload_ty, payload_val, initializer_type); + try writer.writeAll(", .error = "); + try dg.renderValue(writer, error_ty, error_val, initializer_type); + try writer.writeAll(" }"); + }, + .enum_tag => { + const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + try dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location); + }, + .float => { + const bits = ty.floatBits(target); + const f128_val = val.toFloat(f128, mod); + + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; + + assert(bits <= 128); + var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; + var repr_val_big = BigInt.Mutable{ + .limbs = &repr_val_limbs, + .len = undefined, + .positive = undefined, + }; + + switch (bits) { + 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), + 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), + 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), + 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), + 128 => repr_val_big.set(@bitCast(u128, f128_val)), + else => unreachable, + } + + const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); + + try writer.writeAll("zig_cast_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte(' '); + var empty = true; + if (std.math.isFinite(f128_val)) { + try writer.writeAll("zig_make_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + switch (bits) { + 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), + 128 => try writer.print("{x}", .{f128_val}), + else => unreachable, + } + try writer.writeAll(", "); + empty = false; + } else { + // isSignalNan is equivalent to isNan currently, and MSVC doens't have nans, so prefer nan + const operation = if (std.math.isNan(f128_val)) + "nan" + else if (std.math.isSignalNan(f128_val)) + "nans" + else if (std.math.isInf(f128_val)) + "inf" + else + unreachable; + + if (location == .StaticInitializer) { + if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val)) + return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{}); + + // MSVC doesn't have a way to define a custom or signaling NaN value in a constant expression + + // TODO: Re-enable this check, otherwise we're writing qnan bit patterns on msvc incorrectly + // if (std.math.isNan(f128_val) and f128_val != std.math.qnan_f128) + // return dg.fail("Only quiet nans are supported in global variable initializers", .{}); + } + + try writer.writeAll("zig_"); + try writer.writeAll(if (location == .StaticInitializer) "init" else "make"); + try writer.writeAll("_special_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + if (std.math.signbit(f128_val)) try writer.writeByte('-'); + try writer.writeAll(", "); + try writer.writeAll(operation); + try writer.writeAll(", "); + if (std.math.isNan(f128_val)) switch (bits) { + // We only actually need to pass the significand, but it will get + // properly masked anyway, so just pass the whole value. + 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), + 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), + 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), + 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), + 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), + else => unreachable, + }; + try writer.writeAll(", "); + empty = false; + } + try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)}); + if (!empty) try writer.writeByte(')'); + }, + .ptr => |ptr| { + if (ptr.len != .none) { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + try writer.writeByte('{'); + } + switch (ptr.addr) { + .decl, .mut_decl => try dg.renderDeclValue( + writer, + ty, + val, + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }, + location, + ), + .int => |int| { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + try writer.print("){x})", .{ + try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other), + }); + }, + .eu_payload, + .opt_payload, + .elem, + .field, + => try dg.renderParentPtr(writer, val.ip_index, location), + .comptime_field => unreachable, + } + if (ptr.len != .none) { + try writer.writeAll(", "); + try dg.renderValue(writer, Type.usize, ptr.len.toValue(), initializer_type); + try writer.writeByte('}'); + } + }, + .opt => |opt| { + const payload_ty = ty.optionalChild(mod); + + const is_null_val = Value.makeBool(opt.val == .none); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + return dg.renderValue(writer, Type.bool, is_null_val, location); + + if (ty.optionalReprIsPayload(mod)) { + return dg.renderValue(writer, payload_ty, switch (opt.val) { + .none => try mod.intValue(payload_ty, 0), + else => opt.val.toValue(), + }, location); + } + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeAll("{ .payload = "); + try dg.renderValue(writer, payload_ty, switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + else => opt.val, + }.toValue(), initializer_type); + try writer.writeAll(", .is_null = "); + try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); + try writer.writeAll(" }"); + }, + .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type, .vector_type => { + if (location == .FunctionArgument) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + // Fall back to generic implementation. + + // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal + const max_string_initializer_len = 65535; + + const ai = ty.arrayInfo(mod); + if (ai.elem_type.eql(Type.u8, mod)) { + if (ai.len <= max_string_initializer_len) { + var literal = stringLiteral(writer); + try literal.start(); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + const elem_val = try val.elemValue(mod, index); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try literal.writeChar(elem_val_u8); + } + if (ai.sentinel) |s| { + const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); + if (s_u8 != 0) try literal.writeChar(s_u8); + } + try literal.end(); + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(mod, index); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try writer.print("'\\x{x}'", .{elem_val_u8}); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); + } + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(mod, index); + try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); + } + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); + } + }, + .struct_type, .anon_struct_type => switch (ty.containerLayout(mod)) { + .Auto, .Extern => { + const field_vals = val.castTag(.aggregate).?.data; + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeByte('{'); + var empty = true; + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + if (!empty) try writer.writeByte(','); + try dg.renderValue(writer, field_ty, field_val, initializer_type); + + empty = false; + } + try writer.writeByte('}'); + }, + .Packed => { + const field_vals = val.castTag(.aggregate).?.data; + const int_info = ty.intInfo(mod); + + const bits = Type.smallestUnsignedBits(int_info.bits - 1); + const bit_offset_ty = try mod.intType(.unsigned, bits); + + var bit_offset: u64 = 0; + + var eff_num_fields: usize = 0; + for (0..field_vals.len) |field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + eff_num_fields += 1; + } + + if (eff_num_fields == 0) { + try writer.writeByte('('); + try dg.renderValue(writer, ty, Value.undef, initializer_type); + try writer.writeByte(')'); + } else if (ty.bitSize(mod) > 64) { + // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) + var num_or = eff_num_fields - 1; + while (num_or > 0) : (num_or -= 1) { + try writer.writeAll("zig_or_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + } + + var eff_index: usize = 0; + var needs_closing_paren = false; + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + const cast_context = IntCastContext{ .value = .{ .value = field_val } }; + if (bit_offset != 0) { + try writer.writeAll("zig_shl_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); + try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try writer.writeByte(')'); + } else { + try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); + } + + if (needs_closing_paren) try writer.writeByte(')'); + if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); + + bit_offset += field_ty.bitSize(mod); + needs_closing_paren = true; + eff_index += 1; + } + } else { + try writer.writeByte('('); + // a << a_off | b << b_off | c << c_off + var empty = true; + for (field_vals, 0..) |field_val, field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + if (!empty) try writer.writeAll(" | "); + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + + if (bit_offset != 0) { + try dg.renderValue(writer, field_ty, field_val, .Other); + try writer.writeAll(" << "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + } else { + try dg.renderValue(writer, field_ty, field_val, .Other); + } + + bit_offset += field_ty.bitSize(mod); + empty = false; + } + try writer.writeByte(')'); + } + }, + }, + else => unreachable, + }, + .un => { const union_obj = val.castTag(.@"union").?.data; if (!location.isInitializer()) { @@ -1461,22 +1571,6 @@ pub const DeclGen = struct { if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); try writer.writeByte('}'); }, - - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .EnumLiteral => unreachable, - .Void => unreachable, - .NoReturn => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .Opaque => unreachable, - - .Frame, - .AnyFrame, - => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ - @tagName(tag), - }), } } @@ -1504,8 +1598,7 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.val.castTag(.function)) |func_payload| - if (func_payload.data.is_cold) try w.writeAll("zig_cold "); + if (fn_decl.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( @@ -1747,18 +1840,12 @@ pub const DeclGen = struct { fn declIsGlobal(dg: *DeclGen, tv: TypedValue) bool { const mod = dg.module; - switch (tv.val.tag()) { - .extern_fn => return true, - .function => { - const func = tv.val.castTag(.function).?.data; - return mod.decl_exports.contains(func.owner_decl); - }, - .variable => { - const variable = tv.val.castTag(.variable).?.data; - return mod.decl_exports.contains(variable.owner_decl); - }, + return switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .variable => |variable| mod.decl_exports.contains(variable.decl), + .extern_func => true, + .func => |func| mod.decl_exports.contains(mod.funcPtr(func.index).owner_decl), else => unreachable, - } + }; } fn writeCValue(dg: *DeclGen, w: anytype, c_value: CValue) !void { @@ -1833,7 +1920,7 @@ pub const DeclGen = struct { try dg.writeCValue(writer, member); } - fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: *Module.Var) !void { + fn renderFwdDecl(dg: *DeclGen, decl_index: Decl.Index, variable: InternPool.Key.Variable) !void { const decl = dg.module.declPtr(decl_index); const fwd_decl_writer = dg.fwd_decl.writer(); const is_global = dg.declIsGlobal(.{ .ty = decl.ty, .val = decl.val }) or variable.is_extern; @@ -1844,7 +1931,7 @@ pub const DeclGen = struct { fwd_decl_writer, decl.ty, .{ .decl = decl_index }, - CQualifiers.init(.{ .@"const" = !variable.is_mutable }), + CQualifiers.init(.{ .@"const" = variable.is_const }), decl.@"align", .complete, ); @@ -1858,7 +1945,7 @@ pub const DeclGen = struct { if (mod.decl_exports.get(decl_index)) |exports| { try writer.writeAll(exports.items[export_index].options.name); - } else if (decl.isExtern()) { + } else if (decl.isExtern(mod)) { try writer.writeAll(mem.span(decl.name)); } else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), @@ -2416,8 +2503,11 @@ pub fn genErrDecls(o: *Object) !void { var max_name_len: usize = 0; for (mod.error_name_list.items, 0..) |name, value| { max_name_len = std.math.max(name.len, max_name_len); - var err_pl = Value.Payload.Error{ .data = .{ .name = name } }; - try o.dg.renderValue(writer, Type.anyerror, Value.initPayload(&err_pl.base), .Other); + const err_val = try mod.intern(.{ .err = .{ + .ty = .anyerror_type, + .name = mod.intern_pool.getString(name).unwrap().?, + } }); + try o.dg.renderValue(writer, Type.anyerror, err_val.toValue(), .Other); try writer.print(" = {d}u,\n", .{value}); } o.indent_writer.popIndent(); @@ -2451,7 +2541,7 @@ pub fn genErrDecls(o: *Object) !void { const name_array_ty = try mod.arrayType(.{ .len = mod.error_name_list.items.len, - .child = .const_slice_u8_sentinel_0_type, + .child = .slice_const_u8_sentinel_0_type, .sentinel = .zero_u8, }); @@ -2497,7 +2587,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { .tag_name => { const enum_ty = val.data.tag_name; - const name_slice_ty = Type.const_slice_u8_sentinel_0; + const name_slice_ty = Type.slice_const_u8_sentinel_0; try w.writeAll("static "); try o.dg.renderType(w, name_slice_ty); @@ -2668,14 +2758,13 @@ pub fn genDecl(o: *Object) !void { const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return; - if (tv.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll("zig_extern "); try o.dg.renderFunctionSignature(fwd_decl_writer, decl_c_value.decl, .forward, .{ .export_index = 0 }); try fwd_decl_writer.writeAll(";\n"); try genExports(o); - } else if (tv.val.castTag(.variable)) |var_payload| { - const variable: *Module.Var = var_payload.data; + } else if (decl.getVariable(mod)) |variable| { try o.dg.renderFwdDecl(decl_c_value.decl, variable); try genExports(o); @@ -2690,7 +2779,7 @@ pub fn genDecl(o: *Object) !void { try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.@"align", .complete); if (decl.@"linksection" != null) try w.writeAll(", read, write)"); try w.writeAll(" = "); - try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer); + try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer); try w.writeByte(';'); try o.indent_writer.insertNewline(); } else { @@ -4157,10 +4246,13 @@ fn airCall( known: { const fn_decl = fn_decl: { const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known; - break :fn_decl switch (callee_val.tag()) { - .extern_fn => callee_val.castTag(.extern_fn).?.data.owner_decl, - .function => callee_val.castTag(.function).?.data.owner_decl, - .decl_ref => callee_val.castTag(.decl_ref).?.data, + break :fn_decl switch (mod.intern_pool.indexToKey(callee_val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => break :known, + }, else => break :known, }; }; @@ -4231,9 +4323,9 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; - const writer = f.object.writer(); - const function = f.air.values[ty_pl.payload].castTag(.function).?.data; const mod = f.object.dg.module; + const writer = f.object.writer(); + const function = f.air.values[ty_pl.payload].getFunction(mod).?; try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); return .none; } @@ -6634,9 +6726,6 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, accum, .Other); try writer.writeAll(" = "); - var arena = std.heap.ArenaAllocator.init(f.object.dg.gpa); - defer arena.deinit(); - try f.object.dg.renderValue(writer, scalar_ty, switch (reduce.operation) { .Or, .Xor, .Add => try mod.intValue(scalar_ty, 0), .And => switch (scalar_ty.zigTypeTag(mod)) { @@ -6654,7 +6743,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { }, .Max => switch (scalar_ty.zigTypeTag(mod)) { .Bool => try mod.intValue(scalar_ty, 0), - .Int => try scalar_ty.minInt(arena.allocator(), mod), + .Int => try scalar_ty.minInt(mod), .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), else => unreachable, }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8dec958806..f8ddddad1c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -582,7 +582,7 @@ pub const Object = struct { llvm_usize_ty, }; const llvm_slice_ty = self.context.structType(&type_fields, type_fields.len, .False); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const error_name_list = mod.error_name_list.items; @@ -866,10 +866,11 @@ pub const Object = struct { pub fn updateFunc( o: *Object, mod: *Module, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, ) !void { + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const target = mod.getTarget(); @@ -886,7 +887,7 @@ pub const Object = struct { const llvm_func = try dg.resolveLlvmFunction(decl_index); - if (mod.align_stack_fns.get(func)) |align_info| { + if (mod.align_stack_fns.get(func_index)) |align_info| { dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment); dg.addFnAttr(llvm_func, "noinline"); } else { @@ -1164,7 +1165,7 @@ pub const Object = struct { di_file = try dg.object.getDIFile(gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; - const is_internal_linkage = decl.val.tag() != .extern_fn and + const is_internal_linkage = decl.getExternFunc(mod) == null and !mod.decl_exports.contains(decl_index); const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type) llvm.DIFlags.NoReturn @@ -1269,18 +1270,20 @@ pub const Object = struct { // because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`. const llvm_global = self.decl_map.get(decl_index) orelse return; const decl = mod.declPtr(decl_index); - if (decl.isExtern()) { - const is_wasm_fn = mod.getTarget().isWasm() and try decl.isFunction(mod); - const mangle_name = is_wasm_fn and - decl.getExternFn().?.lib_name != null and - !std.mem.eql(u8, std.mem.sliceTo(decl.getExternFn().?.lib_name.?, 0), "c"); - const decl_name = if (mangle_name) name: { - const tmp = try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ - decl.name, decl.getExternFn().?.lib_name.?, - }); - break :name tmp.ptr; - } else decl.name; - defer if (mangle_name) gpa.free(std.mem.sliceTo(decl_name, 0)); + if (decl.isExtern(mod)) { + var free_decl_name = false; + const decl_name = decl_name: { + if (mod.getTarget().isWasm() and try decl.isFunction(mod)) { + if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + free_decl_name = true; + break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ decl.name, lib_name }); + } + } + } + break :decl_name std.mem.span(decl.name); + }; + defer if (free_decl_name) gpa.free(decl_name); llvm_global.setValueName(decl_name); if (self.getLlvmGlobal(decl_name)) |other_global| { @@ -1303,13 +1306,13 @@ pub const Object = struct { di_global.replaceLinkageName(linkage_name); } } - if (decl.val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal) { + if (decl.getVariable(mod)) |variable| { + if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.data.is_weak_linkage) { + if (variable.is_weak_linkage) { llvm_global.setLinkage(.ExternalWeak); } } @@ -1345,8 +1348,8 @@ pub const Object = struct { defer gpa.free(section_z); llvm_global.setSection(section_z); } - if (decl.val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal) { + if (decl.getVariable(mod)) |variable| { + if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } } @@ -1379,9 +1382,9 @@ pub const Object = struct { llvm_global.setLinkage(.Internal); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); llvm_global.setUnnamedAddr(.True); - if (decl.val.castTag(.variable)) |variable| { + if (decl.getVariable(mod)) |variable| { const single_threaded = mod.comp.bin_file.options.single_threaded; - if (variable.data.is_threadlocal and !single_threaded) { + if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); @@ -1510,12 +1513,11 @@ pub const Object = struct { for (enum_type.names, 0..) |field_name_ip, i| { const field_name_z = ip.stringToSlice(field_name_ip); - var bigint_space: InternPool.Key.Int.Storage.BigIntSpace = undefined; - const storage = if (enum_type.values.len != 0) - ip.indexToKey(enum_type.values[i]).int.storage + var bigint_space: Value.BigIntSpace = undefined; + const bigint = if (enum_type.values.len != 0) + enum_type.values[i].toValue().toBigInt(&bigint_space, mod) else - InternPool.Key.Int.Storage{ .u64 = i }; - const bigint = storage.toBigInt(&bigint_space); + std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst(); if (bigint.limbs.len == 1) { enumerators[i] = dib.createEnumerator(field_name_z, bigint.limbs[0], int_info.signedness == .unsigned); @@ -2442,6 +2444,7 @@ pub const DeclGen = struct { } fn genDecl(dg: *DeclGen) !void { + const mod = dg.module; const decl = dg.decl; const decl_index = dg.decl_index; assert(decl.has_tv); @@ -2449,19 +2452,16 @@ pub const DeclGen = struct { log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(), }); - assert(decl.val.ip_index != .none or decl.val.tag() != .function); - if (decl.val.castTag(.extern_fn)) |extern_fn| { - _ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl); + if (decl.getExternFunc(mod)) |extern_func| { + _ = try dg.resolveLlvmFunction(extern_func.decl); } else { - const mod = dg.module; const target = mod.getTarget(); var global = try dg.resolveGlobalDecl(decl_index); global.setAlignment(decl.getAlignment(mod)); if (decl.@"linksection") |section| global.setSection(section); assert(decl.has_tv); - const init_val = if (decl.val.castTag(.variable)) |payload| init_val: { - const variable = payload.data; - break :init_val variable.init; + const init_val = if (decl.getVariable(mod)) |variable| init_val: { + break :init_val variable.init.toValue(); } else init_val: { global.setGlobalConstant(.True); break :init_val decl.val; @@ -2519,7 +2519,7 @@ pub const DeclGen = struct { ); try dg.object.di_map.put(dg.gpa, dg.decl, di_global.getVariable().toNode()); - if (!is_internal_linkage or decl.isExtern()) global.attachMetaData(di_global); + if (!is_internal_linkage or decl.isExtern(mod)) global.attachMetaData(di_global); } } } @@ -2548,17 +2548,16 @@ pub const DeclGen = struct { const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace); gop.value_ptr.* = llvm_fn; - const is_extern = decl.isExtern(); + const is_extern = decl.isExtern(mod); if (!is_extern) { llvm_fn.setLinkage(.Internal); llvm_fn.setUnnamedAddr(.True); } else { if (target.isWasm()) { dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); - if (decl.getExternFn().?.lib_name) |lib_name| { - const module_name = std.mem.sliceTo(lib_name, 0); - if (!std.mem.eql(u8, module_name, "c")) { - dg.addFnAttrString(llvm_fn, "wasm-import-module", module_name); + if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| { + if (!std.mem.eql(u8, lib_name, "c")) { + dg.addFnAttrString(llvm_fn, "wasm-import-module", lib_name); } } } @@ -2695,11 +2694,12 @@ pub const DeclGen = struct { if (gop.found_existing) return gop.value_ptr.*; errdefer assert(dg.object.decl_map.remove(decl_index)); - const decl = dg.module.declPtr(decl_index); - const fqn = try decl.getFullyQualifiedName(dg.module); + const mod = dg.module; + const decl = mod.declPtr(decl_index); + const fqn = try decl.getFullyQualifiedName(mod); defer dg.gpa.free(fqn); - const target = dg.module.getTarget(); + const target = mod.getTarget(); const llvm_type = try dg.lowerType(decl.ty); const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); @@ -2712,18 +2712,18 @@ pub const DeclGen = struct { gop.value_ptr.* = llvm_global; // This is needed for declarations created by `@extern`. - if (decl.isExtern()) { + if (decl.isExtern(mod)) { llvm_global.setValueName(decl.name); llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); - if (decl.val.castTag(.variable)) |variable| { - const single_threaded = dg.module.comp.bin_file.options.single_threaded; - if (variable.data.is_threadlocal and !single_threaded) { + if (decl.getVariable(mod)) |variable| { + const single_threaded = mod.comp.bin_file.options.single_threaded; + if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { llvm_global.setThreadLocalMode(.NotThreadLocal); } - if (variable.data.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); + if (variable.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak); } } else { llvm_global.setLinkage(.Internal); @@ -3199,468 +3199,344 @@ pub const DeclGen = struct { const mod = dg.module; const target = mod.getTarget(); var tv = arg_tv; - if (tv.val.castTag(.runtime_value)) |rt| { - tv.val = rt.data; + switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .runtime_value => |rt| tv.val = rt.val.toValue(), + else => {}, } - if (tv.val.isUndef(mod)) { + if (tv.val.isUndefDeep(mod)) { const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } - switch (tv.ty.zigTypeTag(mod)) { - .Bool => { - const llvm_type = try dg.lowerType(tv.ty); - return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); - }, - .Int => switch (tv.val.ip_index) { - .none => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - else => { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space, mod); - return lowerBigInt(dg, tv.ty, bigint); - }, - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .int => |int| { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int.storage.toBigInt(&bigint_space); - return lowerBigInt(dg, tv.ty, bigint); - }, - else => unreachable, - }, - }, - .Enum => { - const int_val = try tv.enumToInt(mod); - var bigint_space: Value.BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_space, mod); - - const int_info = tv.ty.intInfo(mod); - const llvm_type = dg.context.intType(int_info.bits); - - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); - } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; - }, - .Float => { - const llvm_ty = try dg.lowerType(tv.ty); - switch (tv.ty.floatBits(target)) { - 16 => { - const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); - const llvm_i16 = dg.context.intType(16); - const int = llvm_i16.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 32 => { - const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); - const llvm_i32 = dg.context.intType(32); - const int = llvm_i32.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 64 => { - const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); - const llvm_i64 = dg.context.intType(64); - const int = llvm_i64.constInt(repr, .False); - return int.constBitCast(llvm_ty); - }, - 80 => { - const float = tv.val.toFloat(f80, mod); - const repr = std.math.break_f80(float); - const llvm_i80 = dg.context.intType(80); - var x = llvm_i80.constInt(repr.exp, .False); - x = x.constShl(llvm_i80.constInt(64, .False)); - x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); - if (backendSupportsF80(target)) { - return x.constBitCast(llvm_ty); - } else { - return x; - } - }, - 128 => { - var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); - // LLVM seems to require that the lower half of the f128 be placed first - // in the buffer. - if (native_endian == .Big) { - std.mem.swap(u64, &buf[0], &buf[1]); - } - const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); - return int.constBitCast(llvm_ty); - }, - else => unreachable, - } - }, - .Pointer => switch (tv.val.ip_index) { - .null_value => { - const llvm_type = try dg.lowerType(tv.ty); - return llvm_type.constNull(); - }, - .none => switch (tv.val.tag()) { - .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index), - .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), - .variable => { - const decl_index = tv.val.castTag(.variable).?.data.owner_decl; - const decl = dg.module.declPtr(decl_index); - dg.module.markDeclAlive(decl); - - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - - const val = try dg.resolveGlobalDecl(decl_index); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) - else - val; - return addrspace_casted_ptr; - }, - .slice => { - const slice = tv.val.castTag(.slice).?.data; - const fields: [2]*llvm.Value = .{ - try dg.lowerValue(.{ - .ty = tv.ty.slicePtrFieldType(mod), - .val = slice.ptr, - }), - try dg.lowerValue(.{ - .ty = Type.usize, - .val = slice.len, - }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - .lazy_align, .lazy_size => { - const llvm_usize = try dg.lowerType(Type.usize); - const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(mod), .False); - return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); - }, - .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { - return dg.lowerParentPtr(tv.val, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); - }, - .opt_payload => { - const payload = tv.val.castTag(.opt_payload).?.data; - return dg.lowerParentPtr(payload, tv.ty.ptrInfo(mod).bit_offset % 8 == 0); - }, - else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ - tv.ty.fmtDebug(), tag, - }), - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .int => |int| return dg.lowerIntAsPtr(int), - .ptr => |ptr| { - const ptr_tv: TypedValue = switch (ptr.len) { - .none => tv, - else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) }, - }; - const llvm_ptr_val = switch (ptr.addr) { - .@"var" => |@"var"| ptr: { - const decl = dg.module.declPtr(@"var".owner_decl); - dg.module.markDeclAlive(decl); - - const llvm_wanted_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_actual_addrspace = toLlvmGlobalAddressSpace(decl.@"addrspace", target); - - const val = try dg.resolveGlobalDecl(@"var".owner_decl); - const addrspace_casted_ptr = if (llvm_actual_addrspace != llvm_wanted_addrspace) - val.constAddrSpaceCast(dg.context.pointerType(llvm_wanted_addrspace)) - else - val; - break :ptr addrspace_casted_ptr; - }, - .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl), - .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl), - .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), - .eu_payload, - .opt_payload, - .elem, - .field, - => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0), - .comptime_field => unreachable, - }; - switch (ptr.len) { - .none => return llvm_ptr_val, - else => { - const fields: [2]*llvm.Value = .{ - llvm_ptr_val, - try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), - }; - return dg.context.constStruct(&fields, fields.len, .False); - }, - } - }, - else => unreachable, + if (tv.val.ip_index == .none) switch (tv.ty.zigTypeTag(mod)) { + .Array => switch (tv.val.tag()) { + .bytes => { + const bytes = tv.val.castTag(.bytes).?.data; + return dg.context.constString( + bytes.ptr, + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), + .True, // Don't null terminate. Bytes has the sentinel, if any. + ); }, - }, - .Array => switch (tv.val.ip_index) { - .none => switch (tv.val.tag()) { - .bytes => { - const bytes = tv.val.castTag(.bytes).?.data; - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), - .True, // Don't null terminate. Bytes has the sentinel, if any. - ); - }, - .str_lit => { - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel(mod)) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); - if (byte == 0 and bytes.len > 0) { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .False, // Yes, null terminate. - ); - } - var array = std.ArrayList(u8).init(dg.gpa); - defer array.deinit(); - try array.ensureUnusedCapacity(bytes.len + 1); - array.appendSliceAssumeCapacity(bytes); - array.appendAssumeCapacity(byte); - return dg.context.constString( - array.items.ptr, - @intCast(c_uint, array.items.len), - .True, // Don't null terminate. - ); - } else { + .str_lit => { + const str_lit = tv.val.castTag(.str_lit).?.data; + const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + if (tv.ty.sentinel(mod)) |sent_val| { + const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); + if (byte == 0 and bytes.len > 0) { return dg.context.constString( bytes.ptr, @intCast(c_uint, bytes.len), - .True, // Don't null terminate. `bytes` has the sentinel, if any. - ); - } - }, - .aggregate => { - const elem_vals = tv.val.castTag(.aggregate).?.data; - const elem_ty = tv.ty.childType(mod); - const gpa = dg.gpa; - const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel(mod)); - const llvm_elems = try gpa.alloc(*llvm.Value, len); - defer gpa.free(llvm_elems); - var need_unnamed = false; - for (elem_vals[0..len], 0..) |elem_val, i| { - llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + .False, // Yes, null terminate. ); } - }, - .repeated => { - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.childType(mod); - const sentinel = tv.ty.sentinel(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); - const len_including_sent = len + @boolToInt(sentinel != null); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); + var array = std.ArrayList(u8).init(dg.gpa); + defer array.deinit(); + try array.ensureUnusedCapacity(bytes.len + 1); + array.appendSliceAssumeCapacity(bytes); + array.appendAssumeCapacity(byte); + return dg.context.constString( + array.items.ptr, + @intCast(c_uint, array.items.len), + .True, // Don't null terminate. + ); + } else { + return dg.context.constString( + bytes.ptr, + @intCast(c_uint, bytes.len), + .True, // Don't null terminate. `bytes` has the sentinel, if any. + ); + } + }, + else => unreachable, + }, + .Struct => { + const llvm_struct_ty = try dg.lowerType(tv.ty); + const gpa = dg.gpa; + + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .anon_struct_type => |tuple| { + var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; + defer llvm_fields.deinit(gpa); + try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); + + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); - } - if (sentinel) |sent| { - llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); + const field_llvm_val = try dg.lowerValue(.{ + .ty = field_ty.toType(), + .val = try tv.val.fieldValue(mod, i), + }); + + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } if (need_unnamed) { return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, ); } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), ); } }, - .empty_array_sentinel => { - const elem_ty = tv.ty.childType(mod); - const sent_val = tv.ty.sentinel(mod).?; - const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); - const llvm_elems: [1]*llvm.Value = .{sentinel}; - const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); - if (need_unnamed) { - return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); - } - }, + .struct_type => |struct_type| struct_type, else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .aggregate => |aggregate| switch (aggregate.storage) { - .elems => |elem_vals| { - const elem_ty = tv.ty.childType(mod); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len); - defer gpa.free(llvm_elems); - var need_unnamed = false; - for (elem_vals, 0..) |elem_val, i| { - llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - .repeated_elem => |val| { - const elem_ty = tv.ty.childType(mod); - const sentinel = tv.ty.sentinel(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); - const len_including_sent = len + @boolToInt(sentinel != null); - const gpa = dg.gpa; - const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); - defer gpa.free(llvm_elems); + }; - var need_unnamed = false; - if (len != 0) { - for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() }); - } - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); - } + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - if (sentinel) |sent| { - llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); - need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); - } + if (struct_obj.layout == .Packed) { + assert(struct_obj.haveLayout()); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); + const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); + const fields = struct_obj.fields.values(); + comptime assert(Type.packed_struct_layout_version == 2); + var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_bits: u16 = 0; + for (fields, 0..) |field, i| { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - if (need_unnamed) { - return dg.context.constStruct( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - .True, - ); - } else { - const llvm_elem_ty = try dg.lowerType(elem_ty); - return llvm_elem_ty.constArray( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - } - }, - }, - else => unreachable, - }, - }, - .Optional => { - comptime assert(optional_layout_version == 3); - const payload_ty = tv.ty.optionalChild(mod); + const non_int_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, i), + }); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const small_int_ty = dg.context.intType(ty_bit_size); + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) + non_int_val.constPtrToInt(small_int_ty) + else + non_int_val.constBitCast(small_int_ty); + const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + // If the field is as large as the entire packed struct, this + // zext would go from, e.g. i16 to i16. This is legal with + // constZExtOrBitCast but not legal with constZExt. + const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); + const shifted = extended_int_val.constShl(shift_rhs); + running_int = running_int.constOr(shifted); + running_bits += ty_bit_size; + } + return running_int; + } - const llvm_i8 = dg.context.intType(8); - const is_pl = !tv.val.isNull(mod); - const non_null_bit = if (is_pl) llvm_i8.constInt(1, .False) else llvm_i8.constNull(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return non_null_bit; + const llvm_field_count = llvm_struct_ty.countStructElementTypes(); + var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); + defer llvm_fields.deinit(gpa); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; + + var it = struct_obj.runtimeFieldIterator(mod); + while (it.next()) |field_and_index| { + const field = field_and_index.field; + const field_align = field.alignment(mod, struct_obj.layout); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + + const field_llvm_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, field_and_index.index), + }); + + need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field.ty.abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } - const llvm_ty = try dg.lowerType(tv.ty); - if (tv.ty.optionalReprIsPayload(mod)) return switch (tv.val.ip_index) { - .none => if (tv.val.castTag(.opt_payload)) |payload| - try dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }) - else if (is_pl) - try dg.lowerValue(.{ .ty = payload_ty, .val = tv.val }) - else - llvm_ty.constNull(), - .null_value => llvm_ty.constNull(), - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .opt => |opt| switch (opt.val) { - .none => llvm_ty.constNull(), - else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }), - }, - else => unreachable, - }, - }; - assert(payload_ty.zigTypeTag(mod) != .Fn); - const llvm_field_count = llvm_ty.countStructElementTypes(); - var fields_buf: [3]*llvm.Value = undefined; - fields_buf[0] = try dg.lowerValue(.{ - .ty = payload_ty, - .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.undef, - }); - fields_buf[1] = non_null_bit; - if (llvm_field_count > 2) { - assert(llvm_field_count == 3); - fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); } - return dg.context.constStruct(&fields_buf, llvm_field_count, .False); }, - .Fn => { - const fn_decl_index = switch (tv.val.tag()) { - .extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl, - .function => tv.val.castTag(.function).?.data.owner_decl, - else => unreachable, - }; - const fn_decl = dg.module.declPtr(fn_decl_index); - dg.module.markDeclAlive(fn_decl); - return dg.resolveLlvmFunction(fn_decl_index); + .Vector => switch (tv.val.tag()) { + .bytes => { + // Note, sentinel is not stored even if the type has a sentinel. + const bytes = tv.val.castTag(.bytes).?.data; + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); + assert(vector_len == bytes.len or vector_len + 1 == bytes.len); + + const elem_ty = tv.ty.childType(mod); + const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems, 0..) |*elem, i| { + elem.* = try dg.lowerValue(.{ + .ty = elem_ty, + .val = try mod.intValue(elem_ty, bytes[i]), + }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + .str_lit => { + // Note, sentinel is not stored + const str_lit = tv.val.castTag(.str_lit).?.data; + const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); + assert(vector_len == bytes.len); + + const elem_ty = tv.ty.childType(mod); + const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems, 0..) |*elem, i| { + elem.* = try dg.lowerValue(.{ + .ty = elem_ty, + .val = try mod.intValue(elem_ty, bytes[i]), + }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + else => unreachable, }, - .ErrorSet => { + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Frame, + .AnyFrame, + => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}), + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => { + const llvm_type = try dg.lowerType(tv.ty); + return if (tv.val.toBool(mod)) llvm_type.constAllOnes() else llvm_type.constNull(); + }, + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => |int| { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int.storage.toBigInt(&bigint_space); + return lowerBigInt(dg, tv.ty, bigint); + }, + .err => |err| { const llvm_ty = try dg.lowerType(Type.anyerror); - switch (tv.val.ip_index) { - .none => switch (tv.val.tag()) { - .@"error" => { - const err_name = tv.val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - return llvm_ty.constInt(kv.value, .False); - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return llvm_ty.constNull(); - }, - }, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { - .int => |int| return llvm_ty.constInt(int.storage.u64, .False), - else => unreachable, - }, - } + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + return llvm_ty.constInt(kv.value, .False); }, - .ErrorUnion => { + .error_union => |error_union| { const payload_type = tv.ty.errorUnionPayload(mod); - const is_pl = tv.val.errorUnionIsPayload(); + const is_pl = tv.val.errorUnionIsPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. @@ -3676,7 +3552,10 @@ pub const DeclGen = struct { }); const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, - .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.undef, + .val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_type.ip_index }), + .payload => |payload| payload, + }.toValue(), }); var fields_buf: [3]*llvm.Value = undefined; @@ -3697,172 +3576,396 @@ pub const DeclGen = struct { return dg.context.constStruct(&fields_buf, llvm_field_count, .False); } }, - .Struct => { - const llvm_struct_ty = try dg.lowerType(tv.ty); - const gpa = dg.gpa; + .enum_tag => { + const int_val = try tv.enumToInt(mod); - const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { - .anon_struct_type => |tuple| { - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); + var bigint_space: Value.BigIntSpace = undefined; + const bigint = int_val.toBigInt(&bigint_space, mod); - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + const int_info = tv.ty.intInfo(mod); + const llvm_type = dg.context.intType(int_info.bits); - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + const unsigned_val = v: { + if (bigint.limbs.len == 1) { + break :v llvm_type.constInt(bigint.limbs[0], .False); + } + if (@sizeOf(usize) == @sizeOf(u64)) { + break :v llvm_type.constIntOfArbitraryPrecision( + @intCast(c_uint, bigint.limbs.len), + bigint.limbs.ptr, + ); + } + @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); + }; + if (!bigint.positive) { + return llvm.constNeg(unsigned_val); + } + return unsigned_val; + }, + .float => { + const llvm_ty = try dg.lowerType(tv.ty); + switch (tv.ty.floatBits(target)) { + 16 => { + const repr = @bitCast(u16, tv.val.toFloat(f16, mod)); + const llvm_i16 = dg.context.intType(16); + const int = llvm_i16.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 32 => { + const repr = @bitCast(u32, tv.val.toFloat(f32, mod)); + const llvm_i32 = dg.context.intType(32); + const int = llvm_i32.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 64 => { + const repr = @bitCast(u64, tv.val.toFloat(f64, mod)); + const llvm_i64 = dg.context.intType(64); + const int = llvm_i64.constInt(repr, .False); + return int.constBitCast(llvm_ty); + }, + 80 => { + const float = tv.val.toFloat(f80, mod); + const repr = std.math.break_f80(float); + const llvm_i80 = dg.context.intType(80); + var x = llvm_i80.constInt(repr.exp, .False); + x = x.constShl(llvm_i80.constInt(64, .False)); + x = x.constOr(llvm_i80.constInt(repr.fraction, .False)); + if (backendSupportsF80(target)) { + return x.constBitCast(llvm_ty); + } else { + return x; + } + }, + 128 => { + var buf: [2]u64 = @bitCast([2]u64, tv.val.toFloat(f128, mod)); + // LLVM seems to require that the lower half of the f128 be placed first + // in the buffer. + if (native_endian == .Big) { + std.mem.swap(u64, &buf[0], &buf[1]); + } + const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf); + return int.constBitCast(llvm_ty); + }, + else => unreachable, + } + }, + .ptr => |ptr| { + const ptr_tv: TypedValue = switch (ptr.len) { + .none => tv, + else => .{ .ty = tv.ty.slicePtrFieldType(mod), .val = tv.val.slicePtr(mod) }, + }; + const llvm_ptr_val = switch (ptr.addr) { + .decl => |decl| try dg.lowerDeclRefValue(ptr_tv, decl), + .mut_decl => |mut_decl| try dg.lowerDeclRefValue(ptr_tv, mut_decl.decl), + .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), + .eu_payload, + .opt_payload, + .elem, + .field, + => try dg.lowerParentPtr(ptr_tv.val, ptr_tv.ty.ptrInfo(mod).bit_offset % 8 == 0), + .comptime_field => unreachable, + }; + switch (ptr.len) { + .none => return llvm_ptr_val, + else => { + const fields: [2]*llvm.Value = .{ + llvm_ptr_val, + try dg.lowerValue(.{ .ty = Type.usize, .val = ptr.len.toValue() }), + }; + return dg.context.constStruct(&fields, fields.len, .False); + }, + } + }, + .opt => |opt| { + comptime assert(optional_layout_version == 3); + const payload_ty = tv.ty.optionalChild(mod); - for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { - if (field_val != .none) continue; - if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + const llvm_i8 = dg.context.intType(8); + const non_null_bit = switch (opt.val) { + .none => llvm_i8.constNull(), + else => llvm_i8.constInt(1, .False), + }; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return non_null_bit; + } + const llvm_ty = try dg.lowerType(tv.ty); + if (tv.ty.optionalReprIsPayload(mod)) return switch (opt.val) { + .none => llvm_ty.constNull(), + else => dg.lowerValue(.{ .ty = payload_ty, .val = opt.val.toValue() }), + }; + assert(payload_ty.zigTypeTag(mod) != .Fn); - const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const llvm_field_count = llvm_ty.countStructElementTypes(); + var fields_buf: [3]*llvm.Value = undefined; + fields_buf[0] = try dg.lowerValue(.{ + .ty = payload_ty, + .val = switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + else => |payload| payload, + }.toValue(), + }); + fields_buf[1] = non_null_bit; + if (llvm_field_count > 2) { + assert(llvm_field_count == 3); + fields_buf[2] = llvm_ty.structGetTypeAtIndex(2).getUndef(); + } + return dg.context.constStruct(&fields_buf, llvm_field_count, .False); + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .array_type => switch (aggregate.storage) { + .bytes => |bytes| return dg.context.constString( + bytes.ptr, + @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), + .True, // Don't null terminate. Bytes has the sentinel, if any. + ), + .elems => |elem_vals| { + const elem_ty = tv.ty.childType(mod); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, elem_vals.len); + defer gpa.free(llvm_elems); + var need_unnamed = false; + for (elem_vals, 0..) |elem_val, i| { + llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val.toValue() }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); + } + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + .repeated_elem => |val| { + const elem_ty = tv.ty.childType(mod); + const sentinel = tv.ty.sentinel(mod); + const len = @intCast(usize, tv.ty.arrayLen(mod)); + const len_including_sent = len + @boolToInt(sentinel != null); + const gpa = dg.gpa; + const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent); + defer gpa.free(llvm_elems); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + var need_unnamed = false; + if (len != 0) { + for (llvm_elems[0..len]) |*elem| { + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val.toValue() }); } + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); + } - const field_llvm_val = try dg.lowerValue(.{ - .ty = field_ty.toType(), - .val = try tv.val.fieldValue(field_ty.toType(), mod, i), - }); + if (sentinel) |sent| { + llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); + need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); + } - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); + if (need_unnamed) { + return dg.context.constStruct( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + .True, + ); + } else { + const llvm_elem_ty = try dg.lowerType(elem_ty); + return llvm_elem_ty.constArray( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + } + }, + }, + .vector_type => |vector_type| { + const elem_ty = vector_type.child.toType(); + const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_type.len); + defer dg.gpa.free(llvm_elems); + for (llvm_elems, 0..) |*llvm_elem, i| { + llvm_elem.* = try dg.lowerValue(.{ + .ty = elem_ty, + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }.toValue(), + }); + } + return llvm.constVector( + llvm_elems.ptr, + @intCast(c_uint, llvm_elems.len), + ); + }, + .struct_type, .anon_struct_type => { + const llvm_struct_ty = try dg.lowerType(tv.ty); + const gpa = dg.gpa; - llvm_fields.appendAssumeCapacity(field_llvm_val); + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .anon_struct_type => |tuple| { + var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; + defer llvm_fields.deinit(gpa); - offset += field_ty.toType().abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } + try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); + + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - }, - .struct_type => |struct_type| struct_type, - else => unreachable, - }; + for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { + if (field_val != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; + + const field_align = field_ty.toType().abiAlignment(mod); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field_llvm_val = try dg.lowerValue(.{ + .ty = field_ty.toType(), + .val = try tv.val.fieldValue(mod, i), + }); - if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); - const fields = struct_obj.fields.values(); - comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); - var running_bits: u16 = 0; - for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); - const non_int_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(field.ty, mod, i), - }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); - const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); - const shifted = extended_int_val.constShl(shift_rhs); - running_int = running_int.constOr(shifted); - running_bits += ty_bit_size; - } - return running_int; - } + llvm_fields.appendAssumeCapacity(field_llvm_val); - const llvm_field_count = llvm_struct_ty.countStructElementTypes(); - var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); - defer llvm_fields.deinit(gpa); + offset += field_ty.toType().abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } + } - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } + }, + .struct_type => |struct_type| struct_type, + else => unreachable, + }; - var it = struct_obj.runtimeFieldIterator(mod); - while (it.next()) |field_and_index| { - const field = field_and_index.field; - const field_align = field.alignment(mod, struct_obj.layout); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + if (struct_obj.layout == .Packed) { + assert(struct_obj.haveLayout()); + const big_bits = struct_obj.backing_int_ty.bitSize(mod); + const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); + const fields = struct_obj.fields.values(); + comptime assert(Type.packed_struct_layout_version == 2); + var running_int: *llvm.Value = int_llvm_ty.constNull(); + var running_bits: u16 = 0; + for (fields, 0..) |field, i| { + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + const non_int_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, i), + }); + const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const small_int_ty = dg.context.intType(ty_bit_size); + const small_int_val = if (field.ty.isPtrAtRuntime(mod)) + non_int_val.constPtrToInt(small_int_ty) + else + non_int_val.constBitCast(small_int_ty); + const shift_rhs = int_llvm_ty.constInt(running_bits, .False); + // If the field is as large as the entire packed struct, this + // zext would go from, e.g. i16 to i16. This is legal with + // constZExtOrBitCast but not legal with constZExt. + const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); + const shifted = extended_int_val.constShl(shift_rhs); + running_int = running_int.constOr(shifted); + running_bits += ty_bit_size; + } + return running_int; } - const field_llvm_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(field.ty, mod, field_and_index.index), - }); + const llvm_field_count = llvm_struct_ty.countStructElementTypes(); + var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); + defer llvm_fields.deinit(gpa); - need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + comptime assert(struct_layout_version == 2); + var offset: u64 = 0; + var big_align: u32 = 0; + var need_unnamed = false; + + var it = struct_obj.runtimeFieldIterator(mod); + while (it.next()) |field_and_index| { + const field = field_and_index.field; + const field_align = field.alignment(mod, struct_obj.layout); + big_align = @max(big_align, field_align); + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, field_align); + + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + // TODO make this and all other padding elsewhere in debug + // builds be 0xaa not undef. + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } - llvm_fields.appendAssumeCapacity(field_llvm_val); + const field_llvm_val = try dg.lowerValue(.{ + .ty = field.ty, + .val = try tv.val.fieldValue(mod, field_and_index.index), + }); - offset += field.ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); + + llvm_fields.appendAssumeCapacity(field_llvm_val); + + offset += field.ty.abiSize(mod); + } + { + const prev_offset = offset; + offset = std.mem.alignForwardGeneric(u64, offset, big_align); + const padding_len = offset - prev_offset; + if (padding_len > 0) { + const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); + llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); + } } - } - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } + if (need_unnamed) { + return dg.context.constStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + .False, + ); + } else { + return llvm_struct_ty.constNamedStruct( + llvm_fields.items.ptr, + @intCast(c_uint, llvm_fields.items.len), + ); + } + }, + else => unreachable, }, - .Union => { + .un => { const llvm_union_ty = try dg.lowerType(tv.ty); const tag_and_val: Value.Payload.Union.Data = switch (tv.val.ip_index) { .none => tv.val.castTag(.@"union").?.data, @@ -3950,96 +4053,6 @@ pub const DeclGen = struct { return llvm_union_ty.constNamedStruct(&fields, fields_len); } }, - .Vector => switch (tv.val.tag()) { - .bytes => { - // Note, sentinel is not stored even if the type has a sentinel. - const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .aggregate => { - // Note, sentinel is not stored even if the type has a sentinel. - // The value includes the sentinel in those cases. - const elem_vals = tv.val.castTag(.aggregate).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len); - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .repeated => { - // Note, sentinel is not stored even if the type has a sentinel. - const val = tv.val.castTag(.repeated).?.data; - const elem_ty = tv.ty.childType(mod); - const len = @intCast(usize, tv.ty.arrayLen(mod)); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems) |*elem| { - elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .str_lit => { - // Note, sentinel is not stored - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - else => unreachable, - }, - - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .EnumLiteral => unreachable, - .Void => unreachable, - .NoReturn => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .Opaque => unreachable, - - .Frame, - .AnyFrame, - => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}), } } @@ -4094,10 +4107,9 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); - if (ptr_val.ip_index != .none) return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { .int => |int| dg.lowerIntAsPtr(int), .ptr => |ptr| switch (ptr.addr) { - .@"var" => |@"var"| dg.lowerParentPtrDecl(ptr_val, @"var".owner_decl), .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), .mut_decl => |mut_decl| dg.lowerParentPtrDecl(ptr_val, mut_decl.decl), .int => |int| dg.lowerIntAsPtr(mod.intern_pool.indexToKey(int).int), @@ -4150,7 +4162,7 @@ pub const DeclGen = struct { const indices: [1]*llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; - const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().childType(mod)); + const elem_llvm_ty = try dg.lowerType(ptr.ty.toType().elemType2(mod)); return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); }, .field => |field_ptr| { @@ -4185,7 +4197,7 @@ pub const DeclGen = struct { .Struct => { if (parent_ty.containerLayout(mod) == .Packed) { if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = dg.context.intType(target.cpu.arch.ptrBitWidth()); + const llvm_usize = dg.context.intType(target.ptrBitWidth()); const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); // count bits of fields before this one const prev_bits = b: { @@ -4230,148 +4242,6 @@ pub const DeclGen = struct { }, else => unreachable, }; - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .decl_ref => { - const decl = ptr_val.castTag(.decl_ref).?.data; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .variable => { - const decl = ptr_val.castTag(.variable).?.data.owner_decl; - return dg.lowerParentPtrDecl(ptr_val, decl); - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(field_ptr.container_ptr, byte_aligned); - const parent_ty = field_ptr.container_ty; - - const field_index = @intCast(u32, field_ptr.field_index); - const llvm_u32 = dg.context.intType(32); - switch (parent_ty.zigTypeTag(mod)) { - .Union => { - if (parent_ty.containerLayout(mod) == .Packed) { - return parent_llvm_ptr; - } - - const layout = parent_ty.unionGetLayout(mod); - if (layout.payload_size == 0) { - // In this case a pointer to the union and a pointer to any - // (void) payload is the same. - return parent_llvm_ptr; - } - const llvm_pl_index = if (layout.tag_size == 0) - 0 - else - @boolToInt(layout.tag_align >= layout.payload_align); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_pl_index, .False), - }; - const parent_llvm_ty = try dg.lowerType(parent_ty); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .Struct => { - if (parent_ty.containerLayout(mod) == .Packed) { - if (!byte_aligned) return parent_llvm_ptr; - const llvm_usize = dg.context.intType(target.ptrBitWidth()); - const base_addr = parent_llvm_ptr.constPtrToInt(llvm_usize); - // count bits of fields before this one - const prev_bits = b: { - var b: usize = 0; - for (parent_ty.structFields(mod).values()[0..field_index]) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - b += @intCast(usize, field.ty.bitSize(mod)); - } - break :b b; - }; - const byte_offset = llvm_usize.constInt(prev_bits / 8, .False); - const field_addr = base_addr.constAdd(byte_offset); - const final_llvm_ty = dg.context.pointerType(0); - return field_addr.constIntToPtr(final_llvm_ty); - } - - const parent_llvm_ty = try dg.lowerType(parent_ty); - if (llvmField(parent_ty, field_index, mod)) |llvm_field| { - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(llvm_field.index, .False), - }; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } else { - const llvm_index = llvm_u32.constInt(@boolToInt(parent_ty.hasRuntimeBitsIgnoreComptime(mod)), .False); - const indices: [1]*llvm.Value = .{llvm_index}; - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - } - }, - .Pointer => { - assert(parent_ty.isSlice(mod)); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(field_index, .False), - }; - const parent_llvm_ty = try dg.lowerType(parent_ty); - return parent_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - else => unreachable, - } - }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, true); - - const llvm_usize = try dg.lowerType(Type.usize); - const indices: [1]*llvm.Value = .{ - llvm_usize.constInt(elem_ptr.index, .False), - }; - const elem_llvm_ty = try dg.lowerType(elem_ptr.elem_ty); - return elem_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .opt_payload_ptr => { - const opt_payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(opt_payload_ptr.container_ptr, true); - - const payload_ty = opt_payload_ptr.container_ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or - payload_ty.optionalReprIsPayload(mod)) - { - // In this case, we represent pointer to optional the same as pointer - // to the payload. - return parent_llvm_ptr; - } - - const llvm_u32 = dg.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(0, .False), - }; - const opt_llvm_ty = try dg.lowerType(opt_payload_ptr.container_ty); - return opt_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - .eu_payload_ptr => { - const eu_payload_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, true); - - const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - // In this case, we represent pointer to error union the same as pointer - // to the payload. - return parent_llvm_ptr; - } - - const payload_offset: u8 = if (payload_ty.abiAlignment(mod) > Type.anyerror.abiSize(mod)) 2 else 1; - const llvm_u32 = dg.context.intType(32); - const indices: [2]*llvm.Value = .{ - llvm_u32.constInt(0, .False), - llvm_u32.constInt(payload_offset, .False), - }; - const eu_llvm_ty = try dg.lowerType(eu_payload_ptr.container_ty); - return eu_llvm_ty.constInBoundsGEP(parent_llvm_ptr, &indices, indices.len); - }, - else => unreachable, - } } fn lowerDeclRefValue( @@ -4380,20 +4250,6 @@ pub const DeclGen = struct { decl_index: Module.Decl.Index, ) Error!*llvm.Value { const mod = self.module; - if (tv.ty.isSlice(mod)) { - const ptr_ty = tv.ty.slicePtrFieldType(mod); - const fields: [2]*llvm.Value = .{ - try self.lowerValue(.{ - .ty = ptr_ty, - .val = tv.val, - }), - try self.lowerValue(.{ - .ty = Type.usize, - .val = try mod.intValue(Type.usize, tv.val.sliceLen(mod)), - }), - }; - return self.context.constStruct(&fields, fields.len, .False); - } // In the case of something like: // fn foo() void {} @@ -4401,13 +4257,13 @@ pub const DeclGen = struct { // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. const decl = mod.declPtr(decl_index); - if (decl.val.castTag(.function)) |func| { - if (func.data.owner_decl != decl_index) { - return self.lowerDeclRefValue(tv, func.data.owner_decl); + if (decl.getFunction(mod)) |func| { + if (func.owner_decl != decl_index) { + return self.lowerDeclRefValue(tv, func.owner_decl); } - } else if (decl.val.castTag(.extern_fn)) |func| { - if (func.data.owner_decl != decl_index) { - return self.lowerDeclRefValue(tv, func.data.owner_decl); + } else if (decl.getExternFunc(mod)) |func| { + if (func.decl != decl_index) { + return self.lowerDeclRefValue(tv, func.decl); } } @@ -6333,11 +6189,11 @@ pub const FuncGen = struct { } fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { - const mod = self.dg.module; const dib = self.dg.object.di_builder orelse return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const func = self.air.values[ty_pl.payload].castTag(.function).?.data; + const mod = self.dg.module; + const func = self.air.values[ty_pl.payload].getFunction(mod).?; const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); @@ -6395,8 +6251,8 @@ pub const FuncGen = struct { if (self.dg.object.di_builder == null) return null; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const func = self.air.values[ty_pl.payload].castTag(.function).?.data; const mod = self.dg.module; + const func = self.air.values[ty_pl.payload].getFunction(mod).?; const decl = mod.declPtr(func.owner_decl); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; @@ -8349,7 +8205,7 @@ pub const FuncGen = struct { } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const func = self.dg.decl.getFunction().?; + const func = self.dg.decl.getFunction(mod).?; const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( @@ -9147,7 +9003,7 @@ pub const FuncGen = struct { defer self.gpa.free(fqn); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const llvm_ret_ty = try self.dg.lowerType(slice_ty); const usize_llvm_ty = try self.dg.lowerType(Type.usize); const slice_alignment = slice_ty.abiAlignment(mod); @@ -9861,7 +9717,7 @@ pub const FuncGen = struct { } const mod = self.dg.module; - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); const llvm_slice_ptr_ty = self.context.pointerType(0); // TODO: Address space diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 612ac1f252..96c723989a 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -236,9 +236,9 @@ pub const DeclGen = struct { if (try self.air.value(inst, mod)) |val| { const ty = self.typeOf(inst); if (ty.zigTypeTag(mod) == .Fn) { - const fn_decl_index = switch (val.tag()) { - .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, - .function => val.castTag(.function).?.data.owner_decl, + const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, else => unreachable, }; const spv_decl_index = try self.resolveDecl(fn_decl_index); @@ -261,7 +261,7 @@ pub const DeclGen = struct { const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { // TODO: Extern fn? - const kind: SpvModule.DeclKind = if (decl.val.tag() == .function) + const kind: SpvModule.DeclKind = if (decl.getFunctionIndex(self.module) != .none) .func else .global; @@ -573,6 +573,7 @@ pub const DeclGen = struct { fn addDeclRef(self: *@This(), ty: Type, decl_index: Decl.Index) !void { const dg = self.dg; + const mod = dg.module; const ty_ref = try self.dg.resolveType(ty, .indirect); const ty_id = dg.typeId(ty_ref); @@ -580,8 +581,8 @@ pub const DeclGen = struct { const decl = dg.module.declPtr(decl_index); const spv_decl_index = try dg.resolveDecl(decl_index); - switch (decl.val.tag()) { - .function => { + switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + .func => { // TODO: Properly lower function pointers. For now we are going to hack around it and // just generate an empty pointer. Function pointers are represented by usize for now, // though. @@ -589,7 +590,7 @@ pub const DeclGen = struct { // TODO: Add dependency return; }, - .extern_fn => unreachable, // TODO + .extern_func => unreachable, // TODO else => { const result_id = dg.spv.allocId(); log.debug("addDeclRef: id = {}, index = {}, name = {s}", .{ result_id.id, @enumToInt(spv_decl_index), decl.name }); @@ -610,39 +611,23 @@ pub const DeclGen = struct { } } - fn lower(self: *@This(), ty: Type, val: Value) !void { + fn lower(self: *@This(), ty: Type, arg_val: Value) !void { const dg = self.dg; const mod = dg.module; - if (val.isUndef(mod)) { + var val = arg_val; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, + } + + if (val.isUndefDeep(mod)) { const size = ty.abiSize(mod); return try self.addUndef(size); } - switch (ty.zigTypeTag(mod)) { - .Int => try self.addInt(ty, val), - .Float => try self.addFloat(ty, val), - .Bool => try self.addConstBool(val.toBool(mod)), + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { .Array => switch (val.tag()) { - .aggregate => { - const elem_vals = val.castTag(.aggregate).?.data; - const elem_ty = ty.childType(mod); - const len = @intCast(u32, ty.arrayLenIncludingSentinel(mod)); // TODO: limit spir-v to 32 bit arrays in a more elegant way. - for (elem_vals[0..len]) |elem_val| { - try self.lower(elem_ty, elem_val); - } - }, - .repeated => { - const elem_val = val.castTag(.repeated).?.data; - const elem_ty = ty.childType(mod); - const len = @intCast(u32, ty.arrayLen(mod)); - for (0..len) |_| { - try self.lower(elem_ty, elem_val); - } - if (ty.sentinel(mod)) |sentinel| { - try self.lower(elem_ty, sentinel); - } - }, .str_lit => { const str_lit = val.castTag(.str_lit).?.data; const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; @@ -657,29 +642,6 @@ pub const DeclGen = struct { }, else => |tag| return dg.todo("indirect array constant with tag {s}", .{@tagName(tag)}), }, - .Pointer => switch (val.tag()) { - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - try self.addDeclRef(ty, decl_index); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - try self.addDeclRef(ty, decl_index); - }, - .slice => { - const slice = val.castTag(.slice).?.data; - - const ptr_ty = ty.slicePtrFieldType(mod); - - try self.lower(ptr_ty, slice.ptr); - try self.addInt(Type.usize, slice.len); - }, - .zero => try self.addNullPtr(try dg.resolveType(ty, .indirect)), - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - try self.addInt(Type.usize, val); - }, - else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}), - }, .Struct => { if (ty.isSimpleTupleOrAnonStruct(mod)) { unreachable; // TODO @@ -705,20 +667,134 @@ pub const DeclGen = struct { } } }, - .Optional => { + .Vector, + .Frame, + .AnyFrame, + => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}), + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try self.addConstBool(val.toBool(mod)), + }, + .variable, + .extern_func, + .func, + .enum_literal, + => unreachable, // non-runtime values + .int => try self.addInt(ty, val), + .err => |err| { + const name = mod.intern_pool.stringToSlice(err.name); + const kv = try mod.getErrorValue(name); + try self.addConstInt(u16, @intCast(u16, kv.value)); + }, + .error_union => |error_union| { + const payload_ty = ty.errorUnionPayload(mod); + const is_pl = val.errorUnionIsPayload(mod); + const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); + + const eu_layout = dg.errorUnionLayout(payload_ty); + if (!eu_layout.payload_has_bits) { + return try self.lower(Type.anyerror, error_val); + } + + const payload_size = payload_ty.abiSize(mod); + const error_size = Type.anyerror.abiAlignment(mod); + const ty_size = ty.abiSize(mod); + const padding = ty_size - payload_size - error_size; + + const payload_val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(); + + if (eu_layout.error_first) { + try self.lower(Type.anyerror, error_val); + try self.lower(payload_ty, payload_val); + } else { + try self.lower(payload_ty, payload_val); + try self.lower(Type.anyerror, error_val); + } + + try self.addUndef(padding); + }, + .enum_tag => { + const int_val = try val.enumToInt(ty, mod); + + const int_ty = try ty.intTagType(mod); + + try self.lower(int_ty, int_val); + }, + .float => try self.addFloat(ty, val), + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| try self.addDeclRef(ty, decl), + .mut_decl => |mut_decl| try self.addDeclRef(ty, mut_decl.decl), + else => |tag| return dg.todo("pointer value of type {s}", .{@tagName(tag)}), + } + if (ptr.len != .none) { + try self.addInt(Type.usize, ptr.len.toValue()); + } + }, + .opt => { const payload_ty = ty.optionalChild(mod); - const has_payload = !val.isNull(mod); + const payload_val = val.optionalValue(mod); const abi_size = ty.abiSize(mod); if (!payload_ty.hasRuntimeBits(mod)) { - try self.addConstBool(has_payload); + try self.addConstBool(payload_val != null); return; } else if (ty.optionalReprIsPayload(mod)) { // Optional representation is a nullable pointer or slice. - if (val.castTag(.opt_payload)) |payload| { - try self.lower(payload_ty, payload.data); - } else if (has_payload) { - try self.lower(payload_ty, val); + if (payload_val) |pl_val| { + try self.lower(payload_ty, pl_val); } else { const ptr_ty_ref = try dg.resolveType(ty, .indirect); try self.addNullPtr(ptr_ty_ref); @@ -734,27 +810,63 @@ pub const DeclGen = struct { const payload_size = payload_ty.abiSize(mod); const padding = abi_size - payload_size - 1; - if (val.castTag(.opt_payload)) |payload| { - try self.lower(payload_ty, payload.data); + if (payload_val) |pl_val| { + try self.lower(payload_ty, pl_val); } else { try self.addUndef(payload_size); } - try self.addConstBool(has_payload); + try self.addConstBool(payload_val != null); try self.addUndef(padding); }, - .Enum => { - const int_val = try val.enumToInt(ty, mod); + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type => |array_type| { + const elem_ty = array_type.child.toType(); + switch (aggregate.storage) { + .bytes => |bytes| try self.addBytes(bytes), + .elems, .repeated_elem => { + for (0..array_type.len) |i| { + try self.lower(elem_ty, switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elem_vals| elem_vals[@intCast(usize, i)].toValue(), + .repeated_elem => |elem_val| elem_val.toValue(), + }); + } + }, + } + if (array_type.sentinel != .none) { + try self.lower(elem_ty, array_type.sentinel.toValue()); + } + }, + .vector_type => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}), + .struct_type => { + const struct_ty = mod.typeToStruct(ty).?; - const int_ty = try ty.intTagType(mod); + if (struct_ty.layout == .Packed) { + return dg.todo("packed struct constants", .{}); + } - try self.lower(int_ty, int_val); + const struct_begin = self.size; + const field_vals = val.castTag(.aggregate).?.data; + for (struct_ty.fields.values(), 0..) |field, i| { + if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; + try self.lower(field.ty, field_vals[i]); + + // Add padding if required. + // TODO: Add to type generation as well? + const unpadded_field_end = self.size - struct_begin; + const padded_field_end = ty.structFieldOffset(i + 1, mod); + const padding = padded_field_end - unpadded_field_end; + try self.addUndef(padding); + } + }, + .anon_struct_type => unreachable, // TODO + else => unreachable, }, - .Union => { - const tag_and_val = val.castTag(.@"union").?.data; + .un => |un| { const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0) { - return try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); + return try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } const union_ty = mod.typeToUnion(ty).?; @@ -762,18 +874,18 @@ pub const DeclGen = struct { return dg.todo("packed union constants", .{}); } - const active_field = ty.unionTagFieldIndex(tag_and_val.tag, dg.module).?; + const active_field = ty.unionTagFieldIndex(un.tag.toValue(), dg.module).?; const active_field_ty = union_ty.fields.values()[active_field].ty; const has_tag = layout.tag_size != 0; const tag_first = layout.tag_align >= layout.payload_align; if (has_tag and tag_first) { - try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } const active_field_size = if (active_field_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: { - try self.lower(active_field_ty, tag_and_val.val); + try self.lower(active_field_ty, un.val.toValue()); break :blk active_field_ty.abiSize(mod); } else 0; @@ -781,53 +893,11 @@ pub const DeclGen = struct { try self.addUndef(payload_padding_len); if (has_tag and !tag_first) { - try self.lower(ty.unionTagTypeSafety(mod).?, tag_and_val.tag); + try self.lower(ty.unionTagTypeSafety(mod).?, un.tag.toValue()); } try self.addUndef(layout.padding); }, - .ErrorSet => switch (val.ip_index) { - .none => switch (val.tag()) { - .@"error" => { - const err_name = val.castTag(.@"error").?.data.name; - const kv = try dg.module.getErrorValue(err_name); - try self.addConstInt(u16, @intCast(u16, kv.value)); - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| try self.addConstInt(u16, @intCast(u16, int.storage.u64)), - else => unreachable, - }, - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(mod); - const is_pl = val.errorUnionIsPayload(); - const error_val = if (!is_pl) val else try mod.intValue(Type.anyerror, 0); - - const eu_layout = dg.errorUnionLayout(payload_ty); - if (!eu_layout.payload_has_bits) { - return try self.lower(Type.anyerror, error_val); - } - - const payload_size = payload_ty.abiSize(mod); - const error_size = Type.anyerror.abiAlignment(mod); - const ty_size = ty.abiSize(mod); - const padding = ty_size - payload_size - error_size; - - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; - - if (eu_layout.error_first) { - try self.lower(Type.anyerror, error_val); - try self.lower(payload_ty, payload_val); - } else { - try self.lower(payload_ty, payload_val); - try self.lower(Type.anyerror, error_val); - } - - try self.addUndef(padding); - }, - else => |tag| return dg.todo("indirect constant of type {s}", .{@tagName(tag)}), } } }; @@ -1542,7 +1612,7 @@ pub const DeclGen = struct { const decl_id = self.spv.declPtr(spv_decl_index).result_id; log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); - if (decl.val.castTag(.function)) |_| { + if (decl.getFunction(mod)) |_| { assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ @@ -1595,8 +1665,8 @@ pub const DeclGen = struct { try self.generateTestEntryPoint(fqn, spv_decl_index); } } else { - const init_val = if (decl.val.castTag(.variable)) |payload| - payload.data.init + const init_val = if (decl.getVariable(mod)) |payload| + payload.init.toValue() else decl.val; diff --git a/src/link.zig b/src/link.zig index 1f34b0f760..a44a7387e9 100644 --- a/src/link.zig +++ b/src/link.zig @@ -564,7 +564,8 @@ pub const File = struct { } /// May be called before or after updateDeclExports for any given Decl. - pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void { + pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void { + const func = module.funcPtr(func_index); const owner_decl = module.declPtr(func.owner_decl); log.debug("updateFunc {*} ({s}), type={}", .{ owner_decl, owner_decl.name, owner_decl.ty.fmt(module), @@ -575,14 +576,14 @@ pub const File = struct { } switch (base.tag) { // zig fmt: off - .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness), - .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness), - .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness), - .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness), - .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness), - .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness), - .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func, air, liveness), + .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func_index, air, liveness), + .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func_index, air, liveness), + .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func_index, air, liveness), + .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func_index, air, liveness), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func_index, air, liveness), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func_index, air, liveness), + .nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func_index, air, liveness), // zig fmt: on } } diff --git a/src/link/C.zig b/src/link/C.zig index 1a25bfe231..c871d8a02a 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -87,12 +87,13 @@ pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void { } } -pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *C, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { const tracy = trace(@src()); defer tracy.end(); const gpa = self.base.allocator; + const func = module.funcPtr(func_index); const decl_index = func.owner_decl; const gop = try self.decl_table.getOrPut(gpa, decl_index); if (!gop.found_existing) { @@ -111,7 +112,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes .value_map = codegen.CValueMap.init(gpa), .air = air, .liveness = liveness, - .func = func, + .func_index = func_index, .object = .{ .dg = .{ .gpa = gpa, @@ -555,7 +556,8 @@ fn flushDecl( export_names: std.StringHashMapUnmanaged(void), ) FlushDeclError!void { const gpa = self.base.allocator; - const decl = self.base.options.module.?.declPtr(decl_index); + const mod = self.base.options.module.?; + const decl = mod.declPtr(decl_index); // Before flushing any particular Decl we must ensure its // dependencies are already flushed, so that the order in the .c // file comes out correctly. @@ -569,7 +571,7 @@ fn flushDecl( try self.flushLazyFns(f, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); - if (!(decl.isExtern() and export_names.contains(mem.span(decl.name)))) + if (!(decl.isExtern(mod) and export_names.contains(mem.span(decl.name)))) f.appendBufAssumeCapacity(decl_block.fwd_decl.items); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index efaeebc62e..f4ee2fde97 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1032,18 +1032,19 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void { self.getAtomPtr(atom_index).sym_index = 0; } -pub fn updateFunc(self: *Coff, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Coff, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| { - return llvm_object.updateFunc(mod, func, air, liveness); + return llvm_object.updateFunc(mod, func_index, air, liveness); } } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -1057,7 +1058,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func: *Module.Fn, air: Air, livenes const res = try codegen.generateFunction( &self.base, decl.srcLoc(mod), - func, + func_index, air, liveness, &code_buffer, @@ -1155,11 +1156,10 @@ pub fn updateDecl( const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -1172,7 +1172,7 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, @@ -1313,7 +1313,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (decl.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rdata_section_index.?; @@ -1425,7 +1425,7 @@ pub fn updateDeclExports( // detect the default subsystem. for (exports) |exp| { const exported_decl = mod.declPtr(exp.exported_decl); - if (exported_decl.getFunction() == null) continue; + if (exported_decl.getFunctionIndex(mod) == .none) continue; const winapi_cc = switch (self.base.options.target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index ed2883f4da..d6dd6979ea 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -971,7 +971,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) // For functions we need to add a prologue to the debug line program. try dbg_line_buffer.ensureTotalCapacity(26); - const func = decl.val.castTag(.function).?.data; + const func = decl.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, @@ -1514,7 +1514,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons } } -pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.Decl.Index) !void { +pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !void { const tracy = trace(@src()); defer tracy.end(); @@ -1522,8 +1522,8 @@ pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.De const atom = self.getAtom(.src_fn, atom_index); if (atom.len == 0) return; - const decl = module.declPtr(decl_index); - const func = decl.val.castTag(.function).?.data; + const decl = mod.declPtr(decl_index); + const func = decl.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index b27967884e..476b939038 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2465,7 +2465,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (decl.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rodata_section_index.?; @@ -2574,17 +2574,18 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s return local_sym; } -pub fn updateFunc(self: *Elf, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Elf, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -2599,11 +2600,11 @@ pub fn updateFunc(self: *Elf, mod: *Module, func: *Module.Fn, air: Air, liveness defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none); const code = switch (res) { .ok => code_buffer.items, @@ -2646,11 +2647,10 @@ pub fn updateDecl( const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -2667,7 +2667,7 @@ pub fn updateDecl( defer if (decl_state) |*ds| ds.deinit(); // TODO implement .debug_info for global variables - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index e7723595db..ffbdcdb91f 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1847,16 +1847,17 @@ fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void { self.markRelocsDirtyByTarget(target); } -pub fn updateFunc(self: *MachO, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *MachO, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -1874,11 +1875,11 @@ pub fn updateFunc(self: *MachO, mod: *Module, func: *Module.Fn, air: Air, livene defer if (decl_state) |*ds| ds.deinit(); const res = if (decl_state) |*ds| - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .{ + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{ .dwarf = ds, }) else - try codegen.generateFunction(&self.base, decl.srcLoc(mod), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none); var code = switch (res) { .ok => code_buffer.items, @@ -1983,18 +1984,17 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } } - const is_threadlocal = if (decl.val.castTag(.variable)) |payload| - payload.data.is_threadlocal and !self.base.options.single_threaded + const is_threadlocal = if (decl.getVariable(mod)) |variable| + variable.is_threadlocal and !self.base.options.single_threaded else false; if (is_threadlocal) return self.updateThreadlocalVariable(mod, decl_index); @@ -2012,7 +2012,7 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo null; defer if (decl_state) |*ds| ds.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -2177,7 +2177,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D const decl = module.declPtr(decl_index); const decl_metadata = self.decls.get(decl_index).?; - const decl_val = decl.val.castTag(.variable).?.data.init; + const decl_val = decl.getVariable(mod).?.init.toValue(); const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -2278,8 +2278,8 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { } } - if (val.castTag(.variable)) |variable| { - if (variable.data.is_threadlocal and !single_threaded) { + if (decl.getVariable(mod)) |variable| { + if (variable.is_threadlocal and !single_threaded) { break :blk self.thread_data_section_index.?; } break :blk self.data_section_index.?; @@ -2289,7 +2289,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (val.castTag(.variable)) |_| { + if (decl.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.data_const_section_index.?; diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index 69cd73a602..b74518d930 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -68,9 +68,9 @@ pub fn deinit(self: *NvPtx) void { self.base.allocator.free(self.ptx_file_name); } -pub fn updateFunc(self: *NvPtx, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *NvPtx, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (!build_options.have_llvm) return; - try self.llvm_object.updateFunc(module, func, air, liveness); + try self.llvm_object.updateFunc(module, func_index, air, liveness); } pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: Module.Decl.Index) !void { diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 968cbb0e7e..2071833b93 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -276,11 +276,12 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi } } -pub fn updateFunc(self: *Plan9, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); self.freeUnnamedConsts(decl_index); @@ -299,7 +300,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func: *Module.Fn, air: Air, livene const res = try codegen.generateFunction( &self.base, decl.srcLoc(mod), - func, + func_index, air, liveness, &code_buffer, @@ -391,11 +392,10 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void { const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .extern_fn) { + if (decl.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; + if (decl.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -407,7 +407,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -771,7 +771,7 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { // in the deleteUnusedDecl function. const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const is_fn = (decl.val.tag() == .function); + const is_fn = decl.getFunctionIndex(mod) != .none; if (is_fn) { var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?; var submap = symidx_and_submap.functions; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index da25753b95..0a6608303e 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -103,11 +103,13 @@ pub fn deinit(self: *SpirV) void { self.decl_link.deinit(); } -pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *SpirV, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const func = module.funcPtr(func_index); + var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &self.spv, &self.decl_link); defer decl_gen.deinit(); @@ -136,7 +138,7 @@ pub fn updateDeclExports( exports: []const *Module.Export, ) !void { const decl = mod.declPtr(decl_index); - if (decl.val.tag() == .function and decl.ty.fnCallingConvention(mod) == .Kernel) { + if (decl.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) { // TODO: Unify with resolveDecl in spirv.zig. const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index ef97a7fa7f..78d1be978b 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1324,17 +1324,18 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 { return index; } -pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { +pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness); + if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness); } const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const atom_index = try wasm.getOrCreateAtomForDecl(decl_index); @@ -1358,7 +1359,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes const result = try codegen.generateFunction( &wasm.base, decl.srcLoc(mod), - func, + func_index, air, liveness, &code_writer, @@ -1403,9 +1404,9 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi defer tracy.end(); const decl = mod.declPtr(decl_index); - if (decl.val.castTag(.function)) |_| { + if (decl.getFunction(mod)) |_| { return; - } else if (decl.val.castTag(.extern_fn)) |_| { + } else if (decl.getExternFunc(mod)) |_| { return; } @@ -1413,12 +1414,13 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi const atom = wasm.getAtomPtr(atom_index); atom.clear(); - if (decl.isExtern()) { - const variable = decl.getVariable().?; + if (decl.isExtern(mod)) { + const variable = decl.getVariable(mod).?; const name = mem.sliceTo(decl.name, 0); - return wasm.addOrUpdateImport(name, atom.sym_index, variable.lib_name, null); + const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name); + return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null); } - const val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; var code_writer = std.ArrayList(u8).init(wasm.base.allocator); defer code_writer.deinit(); @@ -1791,7 +1793,7 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void { assert(wasm.symbol_atom.remove(local_atom.symbolLoc())); } - if (decl.isExtern()) { + if (decl.isExtern(mod)) { _ = wasm.imports.remove(atom.symbolLoc()); } _ = wasm.resolved_symbols.swapRemove(atom.symbolLoc()); @@ -1852,7 +1854,7 @@ pub fn addOrUpdateImport( /// Symbol index that is external symbol_index: u32, /// Optional library name (i.e. `extern "c" fn foo() void` - lib_name: ?[*:0]const u8, + lib_name: ?[:0]const u8, /// The index of the type that represents the function signature /// when the extern is a function. When this is null, a data-symbol /// is asserted instead. @@ -1863,7 +1865,7 @@ pub fn addOrUpdateImport( // Also mangle the name when the lib name is set and not equal to "C" so imports with the same // name but different module can be resolved correctly. const mangle_name = lib_name != null and - !std.mem.eql(u8, std.mem.sliceTo(lib_name.?, 0), "c"); + !std.mem.eql(u8, lib_name.?, "c"); const full_name = if (mangle_name) full_name: { break :full_name try std.fmt.allocPrint(wasm.base.allocator, "{s}|{s}", .{ name, lib_name.? }); } else name; @@ -1889,7 +1891,7 @@ pub fn addOrUpdateImport( if (type_index) |ty_index| { const gop = try wasm.imports.getOrPut(wasm.base.allocator, .{ .index = symbol_index, .file = null }); const module_name = if (lib_name) |l_name| blk: { - break :blk mem.sliceTo(l_name, 0); + break :blk l_name; } else wasm.host_name; if (!gop.found_existing) { gop.value_ptr.* = .{ @@ -2931,7 +2933,7 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 { const atom_index = try wasm.createAtom(); const atom = wasm.getAtomPtr(atom_index); - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const mod = wasm.base.options.module.?; atom.alignment = slice_ty.abiAlignment(mod); const sym_index = atom.sym_index; @@ -2988,7 +2990,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void { for (mod.error_name_list.items) |error_name| { const len = @intCast(u32, error_name.len + 1); // names are 0-termianted - const slice_ty = Type.const_slice_u8_sentinel_0; + const slice_ty = Type.slice_const_u8_sentinel_0; const offset = @intCast(u32, atom.code.items.len); // first we create the data for the slice of the name try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated @@ -3366,15 +3368,15 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod var decl_it = wasm.decls.iterator(); while (decl_it.next()) |entry| { const decl = mod.declPtr(entry.key_ptr.*); - if (decl.isExtern()) continue; + if (decl.isExtern(mod)) continue; const atom_index = entry.value_ptr.*; const atom = wasm.getAtomPtr(atom_index); if (decl.ty.zigTypeTag(mod) == .Fn) { try wasm.parseAtom(atom_index, .function); - } else if (decl.getVariable()) |variable| { - if (!variable.is_mutable) { + } else if (decl.getVariable(mod)) |variable| { + if (variable.is_const) { try wasm.parseAtom(atom_index, .{ .data = .read_only }); - } else if (variable.init.isUndefDeep(mod)) { + } else if (variable.init.toValue().isUndefDeep(mod)) { // for safe build modes, we store the atom in the data segment, // whereas for unsafe build modes we store it in bss. const is_initialized = wasm.base.options.optimize_mode == .Debug or diff --git a/src/print_air.zig b/src/print_air.zig index ef52b4c085..9169a88bbc 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -699,8 +699,8 @@ const Writer = struct { fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const function = w.air.values[ty_pl.payload].castTag(.function).?.data; - const owner_decl = w.module.declPtr(function.owner_decl); + const func_index = w.module.intern_pool.indexToFunc(w.air.values[ty_pl.payload].ip_index); + const owner_decl = w.module.declPtr(w.module.funcPtrUnwrap(func_index).?.owner_decl); try s.print("{s}", .{owner_decl.name}); } diff --git a/src/type.zig b/src/type.zig index f2fad91eba..087dc88c30 100644 --- a/src/type.zig +++ b/src/type.zig @@ -93,16 +93,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .simple_value => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }; } @@ -358,7 +365,7 @@ pub const Type = struct { const func = ies.func; try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.declPtr(func.owner_decl); + const owner_decl = mod.declPtr(mod.funcPtr(func).owner_decl); try owner_decl.renderFullyQualifiedName(mod, writer); try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); }, @@ -467,16 +474,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, } } @@ -675,16 +689,23 @@ pub const Type = struct { .enum_type => |enum_type| enum_type.tag_ty.toType().hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -777,16 +798,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }; } @@ -866,8 +894,8 @@ pub const Type = struct { /// May capture a reference to `ty`. /// Returned value has type `comptime_int`. - pub fn lazyAbiAlignment(ty: Type, mod: *Module, arena: Allocator) !Value { - switch (try ty.abiAlignmentAdvanced(mod, .{ .lazy = arena })) { + pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { + switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { .val => |val| return val, .scalar => |x| return mod.intValue(Type.comptime_int, x), } @@ -880,7 +908,7 @@ pub const Type = struct { pub const AbiAlignmentAdvancedStrat = union(enum) { eager, - lazy: Allocator, + lazy, sema: *Sema, }; @@ -1019,16 +1047,18 @@ pub const Type = struct { if (!struct_obj.haveFieldTypes()) switch (strat) { .eager => unreachable, // struct layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }; if (struct_obj.layout == .Packed) { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; - } - }, + .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, .eager => {}, } assert(struct_obj.haveLayout()); @@ -1039,7 +1069,10 @@ pub const Type = struct { var big_align: u32 = 0; for (fields.values()) |field| { if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) continue; @@ -1050,7 +1083,10 @@ pub const Type = struct { .val => switch (strat) { .eager => unreachable, // struct layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, }; big_align = @max(big_align, field_align); @@ -1077,7 +1113,10 @@ pub const Type = struct { .val => switch (strat) { .eager => unreachable, // field type alignment not resolved .sema => unreachable, // passed to abiAlignmentAdvanced above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, } } @@ -1092,16 +1131,23 @@ pub const Type = struct { .enum_type => |enum_type| return AbiAlignmentAdvanced{ .scalar = enum_type.tag_ty.toType().abiAlignment(mod) }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, } } @@ -1118,7 +1164,10 @@ pub const Type = struct { switch (strat) { .eager, .sema => { if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) { return AbiAlignmentAdvanced{ .scalar = code_align }; @@ -1128,7 +1177,7 @@ pub const Type = struct { (try payload_ty.abiAlignmentAdvanced(mod, strat)).scalar, ) }; }, - .lazy => |arena| { + .lazy => { switch (try payload_ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |payload_align| { return AbiAlignmentAdvanced{ @@ -1137,7 +1186,10 @@ pub const Type = struct { }, .val => {}, } - return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }; + return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }; }, } } @@ -1160,16 +1212,22 @@ pub const Type = struct { switch (strat) { .eager, .sema => { if (!(child_type.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) { return AbiAlignmentAdvanced{ .scalar = 1 }; } return child_type.abiAlignmentAdvanced(mod, strat); }, - .lazy => |arena| switch (try child_type.abiAlignmentAdvanced(mod, strat)) { + .lazy => switch (try child_type.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| return AbiAlignmentAdvanced{ .scalar = @max(x, 1) }, - .val => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .val => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, } } @@ -1198,7 +1256,10 @@ pub const Type = struct { if (!union_obj.haveFieldTypes()) switch (strat) { .eager => unreachable, // union layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }; if (union_obj.fields.count() == 0) { if (have_tag) { @@ -1212,7 +1273,10 @@ pub const Type = struct { if (have_tag) max_align = union_obj.tag_ty.abiAlignment(mod); for (union_obj.fields.values()) |field| { if (!(field.ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) continue; @@ -1223,7 +1287,10 @@ pub const Type = struct { .val => switch (strat) { .eager => unreachable, // struct layout not resolved .sema => unreachable, // handled above - .lazy => |arena| return AbiAlignmentAdvanced{ .val = try Value.Tag.lazy_align.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_align = ty.ip_index }, + } })).toValue() }, }, }; max_align = @max(max_align, field_align); @@ -1232,8 +1299,8 @@ pub const Type = struct { } /// May capture a reference to `ty`. - pub fn lazyAbiSize(ty: Type, mod: *Module, arena: Allocator) !Value { - switch (try ty.abiSizeAdvanced(mod, .{ .lazy = arena })) { + pub fn lazyAbiSize(ty: Type, mod: *Module) !Value { + switch (try ty.abiSizeAdvanced(mod, .lazy)) { .val => |val| return val, .scalar => |x| return mod.intValue(Type.comptime_int, x), } @@ -1283,7 +1350,10 @@ pub const Type = struct { .scalar => |elem_size| return .{ .scalar = len * elem_size }, .val => switch (strat) { .sema, .eager => unreachable, - .lazy => |arena| return .{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }, } }, @@ -1291,9 +1361,10 @@ pub const Type = struct { const opt_sema = switch (strat) { .sema => |sema| sema, .eager => null, - .lazy => |arena| return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(arena, ty), - }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }; const elem_bits_u64 = try vector_type.child.toType().bitSizeAdvanced(mod, opt_sema); const elem_bits = @intCast(u32, elem_bits_u64); @@ -1301,9 +1372,10 @@ pub const Type = struct { const total_bytes = (total_bits + 7) / 8; const alignment = switch (try ty.abiAlignmentAdvanced(mod, strat)) { .scalar => |x| x, - .val => return AbiSizeAdvanced{ - .val = try Value.Tag.lazy_size.create(strat.lazy, ty), - }, + .val => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }; const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); return AbiSizeAdvanced{ .scalar = result }; @@ -1320,7 +1392,10 @@ pub const Type = struct { // in abiAlignmentAdvanced. const code_size = abiSize(Type.anyerror, mod); if (!(payload_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) { // Same as anyerror. @@ -1333,7 +1408,10 @@ pub const Type = struct { .val => switch (strat) { .sema => unreachable, .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }, }; @@ -1420,11 +1498,10 @@ pub const Type = struct { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - }, + .lazy => if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, .eager => {}, } assert(struct_obj.haveLayout()); @@ -1433,12 +1510,13 @@ pub const Type = struct { else => { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { + .lazy => { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return AbiSizeAdvanced{ .scalar = 0 }; - if (!struct_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } + if (!struct_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }; }, .eager => {}, } @@ -1469,16 +1547,23 @@ pub const Type = struct { .enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = enum_type.tag_ty.toType().abiSize(mod) }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, } } @@ -1492,11 +1577,10 @@ pub const Type = struct { ) Module.CompileError!AbiSizeAdvanced { switch (strat) { .sema => |sema| try sema.resolveTypeLayout(ty), - .lazy => |arena| { - if (!union_obj.haveLayout()) { - return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }; - } - }, + .lazy => if (!union_obj.haveLayout()) return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, .eager => {}, } return AbiSizeAdvanced{ .scalar = union_obj.abiSize(mod, have_tag) }; @@ -1514,7 +1598,10 @@ pub const Type = struct { } if (!(child_ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(strat.lazy, ty) }, + error.NeedLazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, else => |e| return e, })) return AbiSizeAdvanced{ .scalar = 1 }; @@ -1527,7 +1614,10 @@ pub const Type = struct { .val => switch (strat) { .sema => unreachable, .eager => unreachable, - .lazy => |arena| return AbiSizeAdvanced{ .val = try Value.Tag.lazy_size.create(arena, ty) }, + .lazy => return .{ .val = (try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .lazy_size = ty.ip_index }, + } })).toValue() }, }, }; @@ -1690,16 +1780,23 @@ pub const Type = struct { .enum_type => |enum_type| return bitSizeAdvanced(enum_type.tag_ty.toType(), mod, opt_sema), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, } } @@ -2270,16 +2367,23 @@ pub const Type = struct { .opaque_type => unreachable, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -2443,16 +2547,17 @@ pub const Type = struct { .inferred_error_set_type, => return null, - .array_type => |array_type| { - if (array_type.len == 0) - return Value.initTag(.empty_array); - if ((try array_type.child.toType().onePossibleValue(mod)) != null) - return Value.initTag(.the_only_possible_value); - return null; - }, - .vector_type => |vector_type| { - if (vector_type.len == 0) return Value.initTag(.empty_array); - if (try vector_type.child.toType().onePossibleValue(mod)) |v| return v; + inline .array_type, .vector_type => |seq_type| { + if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .elems = &.{} }, + } })).toValue(); + if (try seq_type.child.toType().onePossibleValue(mod)) |opv| { + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = opv.ip_index }, + } })).toValue(); + } return null; }, .opt_type => |child| { @@ -2595,16 +2700,23 @@ pub const Type = struct { }, // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -2733,16 +2845,23 @@ pub const Type = struct { .enum_type => |enum_type| enum_type.tag_ty.toType().comptimeOnly(mod), // values, not types - .undef => unreachable, - .un => unreachable, - .simple_value => unreachable, - .extern_func => unreachable, - .int => unreachable, - .float => unreachable, - .ptr => unreachable, - .opt => unreachable, - .enum_tag => unreachable, - .aggregate => unreachable, + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .opt, + .aggregate, + .un, + => unreachable, }, }; } @@ -2802,13 +2921,12 @@ pub const Type = struct { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, arena: Allocator, mod: *Module) !Value { + pub fn minInt(ty: Type, mod: *Module) !Value { const scalar = try minIntScalar(ty.scalarType(mod), mod); - if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { - return Value.Tag.repeated.create(arena, scalar); - } else { - return scalar; - } + return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = scalar.ip_index }, + } })).toValue() else scalar; } /// Asserts that the type is an integer. @@ -2832,13 +2950,12 @@ pub const Type = struct { // Works for vectors and vectors of integers. /// The returned Value will have type dest_ty. - pub fn maxInt(ty: Type, arena: Allocator, mod: *Module, dest_ty: Type) !Value { + pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty); - if (ty.zigTypeTag(mod) == .Vector and scalar.tag() != .the_only_possible_value) { - return Value.Tag.repeated.create(arena, scalar); - } else { - return scalar; - } + return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ + .ty = ty.ip_index, + .storage = .{ .repeated_elem = scalar.ip_index }, + } })).toValue() else scalar; } /// The returned Value will have type dest_ty. @@ -3386,12 +3503,12 @@ pub const Type = struct { pub const @"c_ulonglong": Type = .{ .ip_index = .c_ulonglong_type }; pub const @"c_longdouble": Type = .{ .ip_index = .c_longdouble_type }; - pub const const_slice_u8: Type = .{ .ip_index = .const_slice_u8_type }; + pub const slice_const_u8: Type = .{ .ip_index = .slice_const_u8_type }; pub const manyptr_u8: Type = .{ .ip_index = .manyptr_u8_type }; pub const single_const_pointer_to_comptime_int: Type = .{ .ip_index = .single_const_pointer_to_comptime_int_type, }; - pub const const_slice_u8_sentinel_0: Type = .{ .ip_index = .const_slice_u8_sentinel_0_type }; + pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; diff --git a/src/value.zig b/src/value.zig index b1c94d46b5..47215e588c 100644 --- a/src/value.zig +++ b/src/value.zig @@ -33,64 +33,12 @@ pub const Value = struct { // Keep in sync with tools/stage2_pretty_printers_common.py pub const Tag = enum(usize) { // The first section of this enum are tags that require no payload. - /// The only possible value for a particular type, which is stored externally. - the_only_possible_value, - - empty_array, // See last_no_payload_tag below. // After this, the tag requires a payload. - function, - extern_fn, - /// A comptime-known pointer can point to the address of a global - /// variable. The child element value in this case will have this tag. - variable, - /// A wrapper for values which are comptime-known but should - /// semantically be runtime-known. - runtime_value, - /// Represents a pointer to a Decl. - /// When machine codegen backend sees this, it must set the Decl's `alive` field to true. - decl_ref, - /// Pointer to a Decl, but allows comptime code to mutate the Decl's Value. - /// This Tag will never be seen by machine codegen backends. It is changed into a - /// `decl_ref` when a comptime variable goes out of scope. - decl_ref_mut, - /// Behaves like `decl_ref_mut` but validates that the stored value matches the field value. - comptime_field_ptr, - /// Pointer to a specific element of an array, vector or slice. - elem_ptr, - /// Pointer to a specific field of a struct or union. - field_ptr, /// A slice of u8 whose memory is managed externally. bytes, /// Similar to bytes however it stores an index relative to `Module.string_literal_bytes`. str_lit, - /// This value is repeated some number of times. The amount of times to repeat - /// is stored externally. - repeated, - /// An array with length 0 but it has a sentinel. - empty_array_sentinel, - /// Pointer and length as sub `Value` objects. - slice, - enum_literal, - @"error", - /// When the type is error union: - /// * If the tag is `.@"error"`, the error union is an error. - /// * If the tag is `.eu_payload`, the error union is a payload. - /// * A nested error such as `anyerror!(anyerror!T)` in which the the outer error union - /// is non-error, but the inner error union is an error, is represented as - /// a tag of `.eu_payload`, with a sub-tag of `.@"error"`. - eu_payload, - /// A pointer to the payload of an error union, based on a pointer to an error union. - eu_payload_ptr, - /// When the type is optional: - /// * If the tag is `.null_value`, the optional is null. - /// * If the tag is `.opt_payload`, the optional is a payload. - /// * A nested optional such as `??T` in which the the outer optional - /// is non-null, but the inner optional is null, is represented as - /// a tag of `.opt_payload`, with a sub-tag of `.null_value`. - opt_payload, - /// A pointer to the payload of an optional, based on a pointer to an optional. - opt_payload_ptr, /// An instance of a struct, array, or vector. /// Each element/field stored as a `Value`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -104,57 +52,19 @@ pub const Value = struct { /// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc /// instructions for comptime code. inferred_alloc_comptime, - /// The ABI alignment of the payload type. - lazy_align, - /// The ABI size of the payload type. - lazy_size, - pub const last_no_payload_tag = Tag.empty_array; - pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; + pub const no_payload_count = 0; pub fn Type(comptime t: Tag) type { return switch (t) { - .the_only_possible_value, - .empty_array, - => @compileError("Value Tag " ++ @tagName(t) ++ " has no payload"), - - .extern_fn => Payload.ExternFn, - - .decl_ref => Payload.Decl, - - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - .runtime_value, - => Payload.SubValue, - - .eu_payload_ptr, - .opt_payload_ptr, - => Payload.PayloadPtr, - - .bytes, - .enum_literal, - => Payload.Bytes, + .bytes => Payload.Bytes, .str_lit => Payload.StrLit, - .slice => Payload.Slice, - - .lazy_align, - .lazy_size, - => Payload.Ty, - - .function => Payload.Function, - .variable => Payload.Variable, - .decl_ref_mut => Payload.DeclRefMut, - .elem_ptr => Payload.ElemPtr, - .field_ptr => Payload.FieldPtr, - .@"error" => Payload.Error, + .inferred_alloc => Payload.InferredAlloc, .inferred_alloc_comptime => Payload.InferredAllocComptime, .aggregate => Payload.Aggregate, .@"union" => Payload.Union, - .comptime_field_ptr => Payload.ComptimeFieldPtr, }; } @@ -249,91 +159,6 @@ pub const Value = struct { .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, }; } else switch (self.legacy.ptr_otherwise.tag) { - .the_only_possible_value, - .empty_array, - => unreachable, - - .lazy_align, .lazy_size => { - const payload = self.cast(Payload.Ty).?; - const new_payload = try arena.create(Payload.Ty); - new_payload.* = .{ - .base = payload.base, - .data = payload.data, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .function => return self.copyPayloadShallow(arena, Payload.Function), - .extern_fn => return self.copyPayloadShallow(arena, Payload.ExternFn), - .variable => return self.copyPayloadShallow(arena, Payload.Variable), - .decl_ref => return self.copyPayloadShallow(arena, Payload.Decl), - .decl_ref_mut => return self.copyPayloadShallow(arena, Payload.DeclRefMut), - .eu_payload_ptr, - .opt_payload_ptr, - => { - const payload = self.cast(Payload.PayloadPtr).?; - const new_payload = try arena.create(Payload.PayloadPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = payload.data.container_ty, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .comptime_field_ptr => { - const payload = self.cast(Payload.ComptimeFieldPtr).?; - const new_payload = try arena.create(Payload.ComptimeFieldPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .field_val = try payload.data.field_val.copy(arena), - .field_ty = payload.data.field_ty, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .elem_ptr => { - const payload = self.castTag(.elem_ptr).?; - const new_payload = try arena.create(Payload.ElemPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .array_ptr = try payload.data.array_ptr.copy(arena), - .elem_ty = payload.data.elem_ty, - .index = payload.data.index, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .field_ptr => { - const payload = self.castTag(.field_ptr).?; - const new_payload = try arena.create(Payload.FieldPtr); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .container_ptr = try payload.data.container_ptr.copy(arena), - .container_ty = payload.data.container_ty, - .field_index = payload.data.field_index, - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, .bytes => { const bytes = self.castTag(.bytes).?.data; const new_payload = try arena.create(Payload.Bytes); @@ -347,52 +172,6 @@ pub const Value = struct { }; }, .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit), - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - .runtime_value, - => { - const payload = self.cast(Payload.SubValue).?; - const new_payload = try arena.create(Payload.SubValue); - new_payload.* = .{ - .base = payload.base, - .data = try payload.data.copy(arena), - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .slice => { - const payload = self.castTag(.slice).?; - const new_payload = try arena.create(Payload.Slice); - new_payload.* = .{ - .base = payload.base, - .data = .{ - .ptr = try payload.data.ptr.copy(arena), - .len = try payload.data.len.copy(arena), - }, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .enum_literal => { - const payload = self.castTag(.enum_literal).?; - const new_payload = try arena.create(Payload.Bytes); - new_payload.* = .{ - .base = payload.base, - .data = try arena.dupe(u8, payload.data), - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &new_payload.base }, - }; - }, - .@"error" => return self.copyPayloadShallow(arena, Payload.Error), - .aggregate => { const payload = self.castTag(.aggregate).?; const new_payload = try arena.create(Payload.Aggregate); @@ -453,7 +232,7 @@ pub const Value = struct { pub fn dump( start_val: Value, comptime fmt: []const u8, - options: std.fmt.FormatOptions, + _: std.fmt.FormatOptions, out_stream: anytype, ) !void { comptime assert(fmt.len == 0); @@ -469,44 +248,6 @@ pub const Value = struct { .@"union" => { return out_stream.writeAll("(union value)"); }, - .the_only_possible_value => return out_stream.writeAll("(the only possible value)"), - .lazy_align => { - try out_stream.writeAll("@alignOf("); - try val.castTag(.lazy_align).?.data.dump("", options, out_stream); - return try out_stream.writeAll(")"); - }, - .lazy_size => { - try out_stream.writeAll("@sizeOf("); - try val.castTag(.lazy_size).?.data.dump("", options, out_stream); - return try out_stream.writeAll(")"); - }, - .runtime_value => return out_stream.writeAll("[runtime value]"), - .function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}), - .extern_fn => return out_stream.writeAll("(extern function)"), - .variable => return out_stream.writeAll("(variable)"), - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - return out_stream.print("(decl_ref_mut {d})", .{decl_index}); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - return out_stream.print("(decl_ref {d})", .{decl_index}); - }, - .comptime_field_ptr => { - return out_stream.writeAll("(comptime_field_ptr)"); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try out_stream.print("&[{}] ", .{elem_ptr.index}); - val = elem_ptr.array_ptr; - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try out_stream.print("fieldptr({d}) ", .{field_ptr.field_index}); - val = field_ptr.container_ptr; - }, - .empty_array => return out_stream.writeAll(".{}"), - .enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .str_lit => { const str_lit = val.castTag(.str_lit).?.data; @@ -514,31 +255,8 @@ pub const Value = struct { str_lit.index, str_lit.len, }); }, - .repeated => { - try out_stream.writeAll("(repeated) "); - val = val.castTag(.repeated).?.data; - }, - .empty_array_sentinel => return out_stream.writeAll("(empty array with sentinel)"), - .slice => return out_stream.writeAll("(slice)"), - .@"error" => return out_stream.print("error.{s}", .{val.castTag(.@"error").?.data.name}), - .eu_payload => { - try out_stream.writeAll("(eu_payload) "); - val = val.castTag(.eu_payload).?.data; - }, - .opt_payload => { - try out_stream.writeAll("(opt_payload) "); - val = val.castTag(.opt_payload).?.data; - }, .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), - .eu_payload_ptr => { - try out_stream.writeAll("(eu_payload_ptr)"); - val = val.castTag(.eu_payload_ptr).?.data.container_ptr; - }, - .opt_payload_ptr => { - try out_stream.writeAll("(opt_payload_ptr)"); - val = val.castTag(.opt_payload_ptr).?.data.container_ptr; - }, }; } @@ -569,30 +287,23 @@ pub const Value = struct { const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; return allocator.dupe(u8, bytes); }, - .enum_literal => return allocator.dupe(u8, val.castTag(.enum_literal).?.data), - .repeated => { - const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); - @memset(result, byte); - return result; - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - const decl_val = try decl.value(); - return decl_val.toAllocatedBytes(decl.ty, allocator, mod); - }, - .the_only_possible_value => return &[_]u8{}, - .slice => { - const slice = val.castTag(.slice).?.data; - return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(mod), allocator, mod); - }, else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), .ptr => |ptr| switch (ptr.len) { .none => unreachable, - else => return arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), + else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try allocator.dupe(u8, bytes), + .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .repeated_elem => |elem| { + const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); + @memset(result, byte); + return result; + }, }, else => unreachable, }, @@ -611,29 +322,6 @@ pub const Value = struct { pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.ip_index, ty.ip_index); switch (val.tag()) { - .elem_ptr => { - const pl = val.castTag(.elem_ptr).?.data; - return mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, - .addr = .{ .elem = .{ - .base = pl.array_ptr.ip_index, - .index = pl.index, - } }, - } }); - }, - .slice => { - const pl = val.castTag(.slice).?.data; - const ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod); - return mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, - .addr = mod.intern_pool.indexToKey(ptr).ptr.addr, - .len = try pl.len.intern(Type.usize, mod), - } }); - }, - .opt_payload => return mod.intern(.{ .opt = .{ - .ty = ty.ip_index, - .val = try val.castTag(.opt_payload).?.data.intern(ty.childType(mod), mod), - } }), .aggregate => { const old_elems = val.castTag(.aggregate).?.data; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); @@ -651,13 +339,6 @@ pub const Value = struct { .storage = .{ .elems = new_elems }, } }); }, - .repeated => return mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = try val.castTag(.repeated).?.data.intern( - ty.structFieldType(0, mod), - mod, - ) }, - } }), .@"union" => { const pl = val.castTag(.@"union").?.data; return mod.intern(.{ .un = .{ @@ -679,7 +360,6 @@ pub const Value = struct { for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue(); return Tag.aggregate.create(arena, new_elems); }, - .repeated_elem => |elem| return Tag.repeated.create(arena, elem.toValue()), }, else => return val, } @@ -698,31 +378,21 @@ pub const Value = struct { pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { const ip = &mod.intern_pool; switch (val.ip_index) { - .none => { - const field_index = switch (val.tag()) { - .the_only_possible_value => blk: { - assert(ty.enumFieldCount(mod) == 1); - break :blk 0; - }, - .enum_literal => i: { - const name = val.castTag(.enum_literal).?.data; - break :i ty.enumFieldIndex(name, mod).?; - }, - else => unreachable, - }; - return switch (ip.indexToKey(ty.ip_index)) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_type => |enum_type| if (enum_type.values.len != 0) - enum_type.values[field_index].toValue() - else // Field index and integer values are the same. - mod.intValue(enum_type.tag_ty.toType(), field_index), - else => unreachable, - }; - }, else => return switch (ip.indexToKey(ip.typeOf(val.ip_index))) { // Assume it is already an integer and return it directly. .simple_type, .int_type => val, + .enum_literal => |enum_literal| { + const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; + return switch (ip.indexToKey(ty.ip_index)) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_type => |enum_type| if (enum_type.values.len != 0) + enum_type.values[field_index].toValue() + else // Field index and integer values are the same. + mod.intValue(enum_type.tag_ty.toType(), field_index), + else => unreachable, + }; + }, .enum_type => |enum_type| (try ip.getCoerced( mod.gpa, val.ip_index, @@ -733,18 +403,12 @@ pub const Value = struct { } } - pub fn tagName(val: Value, ty: Type, mod: *Module) []const u8 { - _ = ty; // TODO: remove this parameter now that we use InternPool - - if (val.castTag(.enum_literal)) |payload| { - return payload.data; - } - + pub fn tagName(val: Value, mod: *Module) []const u8 { const ip = &mod.intern_pool; - const enum_tag = switch (ip.indexToKey(val.ip_index)) { .un => |un| ip.indexToKey(un.tag).enum_tag, .enum_tag => |x| x, + .enum_literal => |name| return ip.stringToSlice(name), else => unreachable, }; const enum_type = ip.indexToKey(enum_tag.ty).enum_type; @@ -773,49 +437,61 @@ pub const Value = struct { .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), - .none => switch (val.tag()) { - .the_only_possible_value, // i0, u0 - => BigIntMutable.init(&space.limbs, 0).toConst(), - - .runtime_value => { - const sub_val = val.castTag(.runtime_value).?.data; - return sub_val.toBigIntAdvanced(space, mod, opt_sema); - }, - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - try sema.resolveTypeLayout(ty); - } - const x = ty.abiAlignment(mod); - return BigIntMutable.init(&space.limbs, x).toConst(); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - try sema.resolveTypeLayout(ty); - } - const x = ty.abiSize(mod); - return BigIntMutable.init(&space.limbs, x).toConst(); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |runtime_value| runtime_value.val.toValue().toBigIntAdvanced(space, mod, opt_sema), + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => int.storage.toBigInt(space), + .lazy_align, .lazy_size => |ty| { + if (opt_sema) |sema| try sema.resolveTypeLayout(ty.toType()); + const x = switch (int.storage) { + else => unreachable, + .lazy_align => ty.toType().abiAlignment(mod), + .lazy_size => ty.toType().abiSize(mod), + }; + return BigIntMutable.init(&space.limbs, x).toConst(); + }, }, - - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - const array_addr = (try elem_ptr.array_ptr.getUnsignedIntAdvanced(mod, opt_sema)).?; - const elem_size = elem_ptr.elem_ty.abiSize(mod); - const new_addr = array_addr + elem_size * elem_ptr.index; - return BigIntMutable.init(&space.limbs, new_addr).toConst(); + .enum_tag => |enum_tag| enum_tag.int.toValue().toBigIntAdvanced(space, mod, opt_sema), + .ptr => |ptr| switch (ptr.len) { + .none => switch (ptr.addr) { + .int => |int| int.toValue().toBigIntAdvanced(space, mod, opt_sema), + .elem => |elem| { + const base_addr = (try elem.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)).?; + const elem_size = ptr.ty.toType().elemType2(mod).abiSize(mod); + const new_addr = base_addr + elem.index * elem_size; + return BigIntMutable.init(&space.limbs, new_addr).toConst(); + }, + else => unreachable, + }, + else => unreachable, }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| int.storage.toBigInt(space), - .enum_tag => |enum_tag| mod.intern_pool.indexToKey(enum_tag.int).int.storage.toBigInt(space), else => unreachable, }, }; } + pub fn getFunction(val: Value, mod: *Module) ?*Module.Fn { + return mod.funcPtrUnwrap(val.getFunctionIndex(mod)); + } + + pub fn getFunctionIndex(val: Value, mod: *Module) Module.Fn.OptionalIndex { + return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.ip_index) else .none; + } + + pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + .extern_func => |extern_func| extern_func, + else => null, + } else null; + } + + pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| variable, + else => null, + } else null; + } + /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { @@ -825,42 +501,27 @@ pub const Value = struct { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { - switch (val.ip_index) { - .bool_false => return 0, - .bool_true => return 1, + return switch (val.ip_index) { + .bool_false => 0, + .bool_true => 1, .undef => unreachable, - .none => switch (val.tag()) { - .the_only_possible_value, // i0, u0 - => return 0, - - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return (try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; - } else { - return ty.abiAlignment(mod); - } - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return (try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar; - } else { - return ty.abiSize(mod); - } - }, - - else => return null, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, .i64 => |x| std.math.cast(u64, x), + .lazy_align => |ty| if (opt_sema) |sema| + (try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar + else + ty.toType().abiAlignment(mod), + .lazy_size => |ty| if (opt_sema) |sema| + (try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar + else + ty.toType().abiSize(mod), }, else => null, }, - } + }; } /// Asserts the value is an integer and it fits in a u64 @@ -870,58 +531,40 @@ pub const Value = struct { /// Asserts the value is an integer and it fits in a i64 pub fn toSignedInt(val: Value, mod: *Module) i64 { - switch (val.ip_index) { - .bool_false => return 0, - .bool_true => return 1, + return switch (val.ip_index) { + .bool_false => 0, + .bool_true => 1, .undef => unreachable, - .none => switch (val.tag()) { - .the_only_possible_value, // i0, u0 - => return 0, - - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return @intCast(i64, ty.abiAlignment(mod)); - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return @intCast(i64, ty.abiSize(mod)); - }, - - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, .i64 => |x| x, .u64 => |x| @intCast(i64, x), + .lazy_align => |ty| @intCast(i64, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @intCast(i64, ty.toType().abiSize(mod)), }, else => unreachable, }, - } + }; } - pub fn toBool(val: Value, mod: *const Module) bool { + pub fn toBool(val: Value, _: *const Module) bool { return switch (val.ip_index) { .bool_true => true, .bool_false => false, - .none => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| !big_int.eqZero(), - inline .u64, .i64 => |x| x != 0, - }, - else => unreachable, - }, + else => unreachable, }; } - fn isDeclRef(val: Value) bool { + fn isDeclRef(val: Value, mod: *Module) bool { var check = val; - while (true) switch (check.tag()) { - .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return true, - .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, - .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, - .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, + while (true) switch (mod.intern_pool.indexToKey(check.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => return true, + .eu_payload, .opt_payload => |index| check = index.toValue(), + .elem, .field => |base_index| check = base_index.base.toValue(), + else => return false, + }, else => return false, }; } @@ -953,24 +596,9 @@ pub const Value = struct { const bits = int_info.bits; const byte_count = (bits + 7) / 8; - const int_val = try val.enumToInt(ty, mod); - - if (byte_count <= @sizeOf(u64)) { - const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); - const int: u64 = switch (ip_key.int.storage) { - .u64 => |x| x, - .i64 => |x| @bitCast(u64, x), - .big_int => unreachable, - }; - for (buffer[0..byte_count], 0..) |_, i| switch (endian) { - .Little => buffer[i] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), - .Big => buffer[byte_count - i - 1] = @truncate(u8, (int >> @intCast(u6, (8 * i)))), - }; - } else { - var bigint_buffer: BigIntSpace = undefined; - const bigint = int_val.toBigInt(&bigint_buffer, mod); - bigint.writeTwosComplement(buffer[0..byte_count], endian); - } + var bigint_buffer: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buffer, mod); + bigint.writeTwosComplement(buffer[0..byte_count], endian); }, .Float => switch (ty.floatBits(target)) { 16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(u16, val.toFloat(f16, mod)), endian), @@ -1016,7 +644,12 @@ pub const Value = struct { .ErrorSet => { // TODO revisit this when we have the concept of the error tag type const Int = u16; - const int = mod.global_error_set.get(val.castTag(.@"error").?.data.name).?; + const name = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .err => |err| err.name, + .error_union => |error_union| error_union.val.err_name, + else => unreachable, + }; + const int = mod.global_error_set.get(mod.intern_pool.stringToSlice(name)).?; std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); }, .Union => switch (ty.containerLayout(mod)) { @@ -1029,7 +662,7 @@ pub const Value = struct { }, .Pointer => { if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout; - if (val.isDeclRef()) return error.ReinterpretDeclRef; + if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; return val.writeToMemory(Type.usize, mod, buffer); }, .Optional => { @@ -1141,14 +774,14 @@ pub const Value = struct { .Packed => { const field_index = ty.unionTagFieldIndex(val.unionTag(mod), mod); const field_type = ty.unionFields(mod).values()[field_index.?].ty; - const field_val = try val.fieldValue(field_type, mod, field_index.?); + const field_val = try val.fieldValue(mod, field_index.?); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); }, }, .Pointer => { assert(!ty.isSlice(mod)); // No well defined layout. - if (val.isDeclRef()) return error.ReinterpretDeclRef; + if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); }, .Optional => { @@ -1262,13 +895,11 @@ pub const Value = struct { // TODO revisit this when we have the concept of the error tag type const Int = u16; const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); - - const payload = try arena.create(Value.Payload.Error); - payload.* = .{ - .base = .{ .tag = .@"error" }, - .data = .{ .name = mod.error_name_list.items[@intCast(usize, int)] }, - }; - return Value.initPayload(&payload.base); + const name = mod.error_name_list.items[@intCast(usize, int)]; + return (try mod.intern(.{ .err = .{ + .ty = ty.ip_index, + .name = mod.intern_pool.getString(name).unwrap().?, + } })).toValue(); }, .Pointer => { assert(!ty.isSlice(mod)); // No well defined layout. @@ -1383,7 +1014,7 @@ pub const Value = struct { } /// Asserts that the value is a float or an integer. - pub fn toFloat(val: Value, comptime T: type, mod: *const Module) T { + pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { return switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), @@ -1393,6 +1024,8 @@ pub const Value = struct { } return @intToFloat(T, x); }, + .lazy_align => |ty| @intToFloat(T, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @intToFloat(T, ty.toType().abiSize(mod)), }, .float => |float| switch (float.storage) { inline else => |x| @floatCast(T, x), @@ -1421,89 +1054,24 @@ pub const Value = struct { } pub fn clz(val: Value, ty: Type, mod: *Module) u64 { - const ty_bits = ty.intInfo(mod).bits; - return switch (val.ip_index) { - .bool_false => ty_bits, - .bool_true => ty_bits - 1, - .none => switch (val.tag()) { - .the_only_possible_value => { - assert(ty_bits == 0); - return ty_bits; - }, - - .lazy_align, .lazy_size => { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; - return bigint.clz(ty_bits); - }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.clz(ty_bits), - .u64 => |x| @clz(x) + ty_bits - 64, - .i64 => @panic("TODO implement i64 Value clz"), - }, - else => unreachable, - }, - }; + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return bigint.clz(ty.intInfo(mod).bits); } - pub fn ctz(val: Value, ty: Type, mod: *Module) u64 { - const ty_bits = ty.intInfo(mod).bits; - return switch (val.ip_index) { - .bool_false => ty_bits, - .bool_true => 0, - .none => switch (val.tag()) { - .the_only_possible_value => { - assert(ty_bits == 0); - return ty_bits; - }, - - .lazy_align, .lazy_size => { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigIntAdvanced(&bigint_buf, mod, null) catch unreachable; - return bigint.ctz(); - }, - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.ctz(), - .u64 => |x| { - const big = @ctz(x); - return if (big == 64) ty_bits else big; - }, - .i64 => @panic("TODO implement i64 Value ctz"), - }, - else => unreachable, - }, - }; + pub fn ctz(val: Value, _: Type, mod: *Module) u64 { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return bigint.ctz(); } pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { - assert(!val.isUndef(mod)); - switch (val.ip_index) { - .bool_false => return 0, - .bool_true => return 1, - .none => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| { - const info = ty.intInfo(mod); - var buffer: Value.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - return @intCast(u64, big_int.popCount(info.bits)); - }, - else => unreachable, - }, - } + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return @intCast(u64, bigint.popCount(ty.intInfo(mod).bits)); } pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - assert(!val.isUndef(mod)); - const info = ty.intInfo(mod); var buffer: Value.BigIntSpace = undefined; @@ -1520,8 +1088,6 @@ pub const Value = struct { } pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - assert(!val.isUndef(mod)); - const info = ty.intInfo(mod); // Bit count must be evenly divisible by 8 @@ -1543,41 +1109,9 @@ pub const Value = struct { /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. pub fn intBitCountTwosComp(self: Value, mod: *Module) usize { - const target = mod.getTarget(); - return switch (self.ip_index) { - .bool_false => 0, - .bool_true => 1, - .none => switch (self.tag()) { - .the_only_possible_value => 0, - - .decl_ref_mut, - .comptime_field_ptr, - .extern_fn, - .decl_ref, - .function, - .variable, - .eu_payload_ptr, - .opt_payload_ptr, - => target.ptrBitWidth(), - - else => { - var buffer: BigIntSpace = undefined; - return self.toBigInt(&buffer, mod).bitCountTwosComp(); - }, - }, - else => switch (mod.intern_pool.indexToKey(self.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.bitCountTwosComp(), - .u64 => |x| if (x == 0) 0 else @intCast(usize, std.math.log2(x) + 1), - .i64 => { - var buffer: Value.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - return big_int.bitCountTwosComp(); - }, - }, - else => unreachable, - }, - }; + var buffer: BigIntSpace = undefined; + const big_int = self.toBigInt(&buffer, mod); + return big_int.bitCountTwosComp(); } /// Converts an integer or a float to a float. May result in a loss of information. @@ -1616,84 +1150,39 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { - switch (lhs.ip_index) { - .bool_false => return .eq, - .bool_true => return .gt, - .none => return switch (lhs.tag()) { - .the_only_possible_value => .eq, - - .decl_ref, - .decl_ref_mut, - .comptime_field_ptr, - .extern_fn, - .function, - .variable, - => .gt, - - .runtime_value => { - // This is needed to correctly handle hashing the value. - // Checks in Sema should prevent direct comparisons from reaching here. - const val = lhs.castTag(.runtime_value).?.data; - return val.orderAgainstZeroAdvanced(mod, opt_sema); - }, - - .lazy_align => { - const ty = lhs.castTag(.lazy_align).?.data; - const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) { - return .gt; - } else { - return .eq; - } - }, - .lazy_size => { - const ty = lhs.castTag(.lazy_size).?.data; - const strat: Type.AbiAlignmentAdvancedStrat = if (opt_sema) |sema| .{ .sema = sema } else .eager; - if (ty.hasRuntimeBitsAdvanced(mod, false, strat) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) { - return .gt; - } else { - return .eq; - } - }, - - .elem_ptr => { - const elem_ptr = lhs.castTag(.elem_ptr).?.data; - switch (try elem_ptr.array_ptr.orderAgainstZeroAdvanced(mod, opt_sema)) { + return switch (lhs.ip_index) { + .bool_false => .eq, + .bool_true => .gt, + else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => .gt, + .int => |int| int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), + .elem => |elem| switch (try elem.base.toValue().orderAgainstZeroAdvanced(mod, opt_sema)) { .lt => unreachable, - .gt => return .gt, - .eq => { - if (elem_ptr.index == 0) { - return .eq; - } else { - return .gt; - } - }, - } + .gt => .gt, + .eq => if (elem.index == 0) .eq else .gt, + }, + else => unreachable, }, - - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(lhs.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.orderAgainstScalar(0), inline .u64, .i64 => |x| std.math.order(x, 0), + .lazy_align, .lazy_size => |ty| return if (ty.toType().hasRuntimeBitsAdvanced( + mod, + false, + if (opt_sema) |sema| .{ .sema = sema } else .eager, + ) catch |err| switch (err) { + error.NeedLazy => unreachable, + else => |e| return e, + }) .gt else .eq, }, - .enum_tag => |enum_tag| switch (mod.intern_pool.indexToKey(enum_tag.int).int.storage) { - .big_int => |big_int| big_int.orderAgainstScalar(0), - inline .u64, .i64 => |x| std.math.order(x, 0), - }, + .enum_tag => |enum_tag| enum_tag.int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), .float => |float| switch (float.storage) { inline else => |x| std.math.order(x, 0), }, else => unreachable, }, - } + }; } /// Asserts the value is comparable. @@ -1760,8 +1249,8 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) !bool { - if (lhs.pointerDecl()) |lhs_decl| { - if (rhs.pointerDecl()) |rhs_decl| { + if (lhs.pointerDecl(mod)) |lhs_decl| { + if (rhs.pointerDecl(mod)) |rhs_decl| { switch (op) { .eq => return lhs_decl == rhs_decl, .neq => return lhs_decl != rhs_decl, @@ -1774,7 +1263,7 @@ pub const Value = struct { else => {}, } } - } else if (rhs.pointerDecl()) |_| { + } else if (rhs.pointerDecl(mod)) |_| { switch (op) { .eq => return false, .neq => return true, @@ -1849,7 +1338,6 @@ pub const Value = struct { switch (lhs.ip_index) { .none => switch (lhs.tag()) { - .repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvancedExtra(op, mod, opt_sema), .aggregate => { for (lhs.castTag(.aggregate).?.data) |elem_val| { if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false; @@ -1877,6 +1365,15 @@ pub const Value = struct { .float => |float| switch (float.storage) { inline else => |x| if (std.math.isNan(x)) return op == .neq, }, + .aggregate => |aggregate| return switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| { + if (!std.math.order(byte, 0).compare(op)) break false; + } else true, + .elems => |elems| for (elems) |elem| { + if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + } else true, + .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema), + }, else => {}, }, } @@ -1910,69 +1407,6 @@ pub const Value = struct { const a_tag = a.tag(); const b_tag = b.tag(); if (a_tag == b_tag) switch (a_tag) { - .the_only_possible_value => return true, - .enum_literal => { - const a_name = a.castTag(.enum_literal).?.data; - const b_name = b.castTag(.enum_literal).?.data; - return std.mem.eql(u8, a_name, b_name); - }, - .opt_payload => { - const a_payload = a.castTag(.opt_payload).?.data; - const b_payload = b.castTag(.opt_payload).?.data; - const payload_ty = ty.optionalChild(mod); - return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); - }, - .slice => { - const a_payload = a.castTag(.slice).?.data; - const b_payload = b.castTag(.slice).?.data; - if (!(try eqlAdvanced(a_payload.len, Type.usize, b_payload.len, Type.usize, mod, opt_sema))) { - return false; - } - - const ptr_ty = ty.slicePtrFieldType(mod); - - return eqlAdvanced(a_payload.ptr, ptr_ty, b_payload.ptr, ptr_ty, mod, opt_sema); - }, - .elem_ptr => { - const a_payload = a.castTag(.elem_ptr).?.data; - const b_payload = b.castTag(.elem_ptr).?.data; - if (a_payload.index != b_payload.index) return false; - - return eqlAdvanced(a_payload.array_ptr, ty, b_payload.array_ptr, ty, mod, opt_sema); - }, - .field_ptr => { - const a_payload = a.castTag(.field_ptr).?.data; - const b_payload = b.castTag(.field_ptr).?.data; - if (a_payload.field_index != b_payload.field_index) return false; - - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .@"error" => { - const a_name = a.castTag(.@"error").?.data.name; - const b_name = b.castTag(.@"error").?.data.name; - return std.mem.eql(u8, a_name, b_name); - }, - .eu_payload => { - const a_payload = a.castTag(.eu_payload).?.data; - const b_payload = b.castTag(.eu_payload).?.data; - const payload_ty = ty.errorUnionPayload(mod); - return eqlAdvanced(a_payload, payload_ty, b_payload, payload_ty, mod, opt_sema); - }, - .eu_payload_ptr => { - const a_payload = a.castTag(.eu_payload_ptr).?.data; - const b_payload = b.castTag(.eu_payload_ptr).?.data; - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .opt_payload_ptr => { - const a_payload = a.castTag(.opt_payload_ptr).?.data; - const b_payload = b.castTag(.opt_payload_ptr).?.data; - return eqlAdvanced(a_payload.container_ptr, ty, b_payload.container_ptr, ty, mod, opt_sema); - }, - .function => { - const a_payload = a.castTag(.function).?.data; - const b_payload = b.castTag(.function).?.data; - return a_payload == b_payload; - }, .aggregate => { const a_field_vals = a.castTag(.aggregate).?.data; const b_field_vals = b.castTag(.aggregate).?.data; @@ -2035,17 +1469,15 @@ pub const Value = struct { return eqlAdvanced(a_union.val, active_field_ty, b_union.val, active_field_ty, mod, opt_sema); }, else => {}, - } else if (b_tag == .@"error") { - return false; - } + }; - if (a.pointerDecl()) |a_decl| { - if (b.pointerDecl()) |b_decl| { + if (a.pointerDecl(mod)) |a_decl| { + if (b.pointerDecl(mod)) |b_decl| { return a_decl == b_decl; } else { return false; } - } else if (b.pointerDecl()) |_| { + } else if (b.pointerDecl(mod)) |_| { return false; } @@ -2130,25 +1562,11 @@ pub const Value = struct { if (a_nan) return true; return a_float == b_float; }, - .Optional => if (b_tag == .opt_payload) { - var sub_pl: Payload.SubValue = .{ - .base = .{ .tag = b.tag() }, - .data = a, - }; - const sub_val = Value.initPayload(&sub_pl.base); - return eqlAdvanced(sub_val, ty, b, ty, mod, opt_sema); - }, - .ErrorUnion => if (a_tag != .@"error" and b_tag == .eu_payload) { - var sub_pl: Payload.SubValue = .{ - .base = .{ .tag = b.tag() }, - .data = a, - }; - const sub_val = Value.initPayload(&sub_pl.base); - return eqlAdvanced(sub_val, ty, b, ty, mod, opt_sema); - }, + .Optional, + .ErrorUnion, + => unreachable, // handled by InternPool else => {}, } - if (a_tag == .@"error") return false; return (try orderAdvanced(a, b, mod, opt_sema)).compare(.eq); } @@ -2166,7 +1584,7 @@ pub const Value = struct { std.hash.autoHash(hasher, zig_ty_tag); if (val.isUndef(mod)) return; // The value is runtime-known and shouldn't affect the hash. - if (val.isRuntimeValue()) return; + if (val.isRuntimeValue(mod)) return; switch (zig_ty_tag) { .Opaque => unreachable, // Cannot hash opaque types @@ -2177,38 +1595,20 @@ pub const Value = struct { .Null, => {}, - .Type => unreachable, // handled via ip_index check above - .Float => { - // For hash/eql purposes, we treat floats as their IEEE integer representation. - switch (ty.floatBits(mod.getTarget())) { - 16 => std.hash.autoHash(hasher, @bitCast(u16, val.toFloat(f16, mod))), - 32 => std.hash.autoHash(hasher, @bitCast(u32, val.toFloat(f32, mod))), - 64 => std.hash.autoHash(hasher, @bitCast(u64, val.toFloat(f64, mod))), - 80 => std.hash.autoHash(hasher, @bitCast(u80, val.toFloat(f80, mod))), - 128 => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), - else => unreachable, - } - }, - .ComptimeFloat => { - const float = val.toFloat(f128, mod); - const is_nan = std.math.isNan(float); - std.hash.autoHash(hasher, is_nan); - if (!is_nan) { - std.hash.autoHash(hasher, @bitCast(u128, float)); - } else { - std.hash.autoHash(hasher, std.math.signbit(float)); - } - }, - .Bool, .Int, .ComptimeInt, .Pointer => switch (val.tag()) { - .slice => { - const slice = val.castTag(.slice).?.data; - const ptr_ty = ty.slicePtrFieldType(mod); - hash(slice.ptr, ptr_ty, hasher, mod); - hash(slice.len, Type.usize, hasher, mod); - }, - - else => return hashPtr(val, hasher, mod), - }, + .Type, + .Float, + .ComptimeFloat, + .Bool, + .Int, + .ComptimeInt, + .Pointer, + .Optional, + .ErrorUnion, + .ErrorSet, + .Enum, + .EnumLiteral, + .Fn, + => unreachable, // handled via ip_index check above .Array, .Vector => { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); @@ -2233,42 +1633,6 @@ pub const Value = struct { else => unreachable, } }, - .Optional => { - if (val.castTag(.opt_payload)) |payload| { - std.hash.autoHash(hasher, true); // non-null - const sub_val = payload.data; - const sub_ty = ty.optionalChild(mod); - sub_val.hash(sub_ty, hasher, mod); - } else { - std.hash.autoHash(hasher, false); // null - } - }, - .ErrorUnion => { - if (val.tag() == .@"error") { - std.hash.autoHash(hasher, false); // error - const sub_ty = ty.errorUnionSet(mod); - val.hash(sub_ty, hasher, mod); - return; - } - - if (val.castTag(.eu_payload)) |payload| { - std.hash.autoHash(hasher, true); // payload - const sub_ty = ty.errorUnionPayload(mod); - payload.data.hash(sub_ty, hasher, mod); - return; - } else unreachable; - }, - .ErrorSet => { - // just hash the literal error value. this is the most stable - // thing between compiler invocations. we can't use the error - // int cause (1) its not stable and (2) we don't have access to mod. - hasher.update(val.getError().?); - }, - .Enum => { - // This panic will go away when enum values move to be stored in the intern pool. - const int_val = val.enumToInt(ty, mod) catch @panic("OOM"); - hashInt(int_val, hasher, mod); - }, .Union => { const union_obj = val.cast(Payload.Union).?.data; if (ty.unionTagType(mod)) |tag_ty| { @@ -2277,27 +1641,12 @@ pub const Value = struct { const active_field_ty = ty.unionFieldType(union_obj.tag, mod); union_obj.val.hash(active_field_ty, hasher, mod); }, - .Fn => { - // Note that this hashes the *Fn/*ExternFn rather than the *Decl. - // This is to differentiate function bodies from function pointers. - // This is currently redundant since we already hash the zig type tag - // at the top of this function. - if (val.castTag(.function)) |func| { - std.hash.autoHash(hasher, func.data); - } else if (val.castTag(.extern_fn)) |func| { - std.hash.autoHash(hasher, func.data); - } else unreachable; - }, .Frame => { @panic("TODO implement hashing frame values"); }, .AnyFrame => { @panic("TODO implement hashing anyframe values"); }, - .EnumLiteral => { - const bytes = val.castTag(.enum_literal).?.data; - hasher.update(bytes); - }, } } @@ -2308,7 +1657,7 @@ pub const Value = struct { pub fn hashUncoerced(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void { if (val.isUndef(mod)) return; // The value is runtime-known and shouldn't affect the hash. - if (val.isRuntimeValue()) return; + if (val.isRuntimeValue(mod)) return; if (val.ip_index != .none) { // The InternPool data structure hashes based on Key to make interned objects @@ -2326,16 +1675,20 @@ pub const Value = struct { .Null, .Struct, // It sure would be nice to do something clever with structs. => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), - .Type => unreachable, // handled above with the ip_index check - .Float, .ComptimeFloat => std.hash.autoHash(hasher, @bitCast(u128, val.toFloat(f128, mod))), - .Bool, .Int, .ComptimeInt, .Pointer, .Fn => switch (val.tag()) { - .slice => { - const slice = val.castTag(.slice).?.data; - const ptr_ty = ty.slicePtrFieldType(mod); - slice.ptr.hashUncoerced(ptr_ty, hasher, mod); - }, - else => val.hashPtr(hasher, mod), - }, + .Type, + .Float, + .ComptimeFloat, + .Bool, + .Int, + .ComptimeInt, + .Pointer, + .Fn, + .Optional, + .ErrorSet, + .ErrorUnion, + .Enum, + .EnumLiteral, + => unreachable, // handled above with the ip_index check .Array, .Vector => { const len = ty.arrayLen(mod); const elem_ty = ty.childType(mod); @@ -2348,21 +1701,16 @@ pub const Value = struct { elem_val.hashUncoerced(elem_ty, hasher, mod); } }, - .Optional => if (val.castTag(.opt_payload)) |payload| { - const child_ty = ty.optionalChild(mod); - payload.data.hashUncoerced(child_ty, hasher, mod); - } else std.hash.autoHash(hasher, std.builtin.TypeId.Null), - .ErrorSet, .ErrorUnion => if (val.getError()) |err| hasher.update(err) else { - const pl_ty = ty.errorUnionPayload(mod); - val.castTag(.eu_payload).?.data.hashUncoerced(pl_ty, hasher, mod); - }, - .Enum, .EnumLiteral, .Union => { - hasher.update(val.tagName(ty, mod)); - if (val.cast(Payload.Union)) |union_obj| { - const active_field_ty = ty.unionFieldType(union_obj.data.tag, mod); - union_obj.data.val.hashUncoerced(active_field_ty, hasher, mod); - } else std.hash.autoHash(hasher, std.builtin.TypeId.Void); - }, + .Union => { + hasher.update(val.tagName(mod)); + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .un => |un| { + const active_field_ty = ty.unionFieldType(un.tag.toValue(), mod); + un.val.toValue().hashUncoerced(active_field_ty, hasher, mod); + }, + else => std.hash.autoHash(hasher, std.builtin.TypeId.Void), + } + }, .Frame => @panic("TODO implement hashing frame values"), .AnyFrame => @panic("TODO implement hashing anyframe values"), } @@ -2397,57 +1745,53 @@ pub const Value = struct { } }; - pub fn isComptimeMutablePtr(val: Value) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .decl_ref_mut, .comptime_field_ptr => true, - .elem_ptr => isComptimeMutablePtr(val.castTag(.elem_ptr).?.data.array_ptr), - .field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr), - .eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr), - .opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data.container_ptr), - .slice => isComptimeMutablePtr(val.castTag(.slice).?.data.ptr), - + pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .mut_decl, .comptime_field => true, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isComptimeMutablePtr(mod), + .elem, .field => |base_index| base_index.base.toValue().isComptimeMutablePtr(mod), else => false, }, else => false, }; } - pub fn canMutateComptimeVarState(val: Value) bool { - if (val.isComptimeMutablePtr()) return true; - return switch (val.ip_index) { - .none => switch (val.tag()) { - .repeated => return val.castTag(.repeated).?.data.canMutateComptimeVarState(), - .eu_payload => return val.castTag(.eu_payload).?.data.canMutateComptimeVarState(), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), - .opt_payload => return val.castTag(.opt_payload).?.data.canMutateComptimeVarState(), - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.canMutateComptimeVarState(), - .aggregate => { - const fields = val.castTag(.aggregate).?.data; - for (fields) |field| { - if (field.canMutateComptimeVarState()) return true; - } - return false; + pub fn canMutateComptimeVarState(val: Value, mod: *Module) bool { + return val.isComptimeMutablePtr(mod) or switch (val.ip_index) { + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .error_union => |error_union| switch (error_union.val) { + .err_name => false, + .payload => |payload| payload.toValue().canMutateComptimeVarState(mod), }, - .@"union" => return val.cast(Payload.Union).?.data.val.canMutateComptimeVarState(), - .slice => return val.castTag(.slice).?.data.ptr.canMutateComptimeVarState(), - else => return false, + .ptr => |ptr| switch (ptr.addr) { + .eu_payload, .opt_payload => |base| base.toValue().canMutateComptimeVarState(mod), + else => false, + }, + .opt => |opt| switch (opt.val) { + .none => false, + else => opt.val.toValue().canMutateComptimeVarState(mod), + }, + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { + if (elem.toValue().canMutateComptimeVarState(mod)) break true; + } else false, + .un => |un| un.val.toValue().canMutateComptimeVarState(mod), + else => false, }, - else => return false, }; } /// Gets the decl referenced by this pointer. If the pointer does not point /// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), /// this function returns null. - pub fn pointerDecl(val: Value) ?Module.Decl.Index { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl_index, - .extern_fn => val.castTag(.extern_fn).?.data.owner_decl, - .function => val.castTag(.function).?.data.owner_decl, - .variable => val.castTag(.variable).?.data.owner_decl, - .decl_ref => val.cast(Payload.Decl).?.data, + pub fn pointerDecl(val: Value, mod: *Module) ?Module.Decl.Index { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| variable.decl, + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => null, }, else => null, @@ -2463,95 +1807,15 @@ pub const Value = struct { } } - fn hashPtr(ptr_val: Value, hasher: *std.hash.Wyhash, mod: *Module) void { - switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - .extern_fn, - .function, - .variable, - => { - const decl: Module.Decl.Index = ptr_val.pointerDecl().?; - std.hash.autoHash(hasher, decl); - }, - .comptime_field_ptr => { - std.hash.autoHash(hasher, Value.Tag.comptime_field_ptr); - }, - - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - hashPtr(elem_ptr.array_ptr, hasher, mod); - std.hash.autoHash(hasher, Value.Tag.elem_ptr); - std.hash.autoHash(hasher, elem_ptr.index); - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.field_ptr); - hashPtr(field_ptr.container_ptr, hasher, mod); - std.hash.autoHash(hasher, field_ptr.field_index); - }, - .eu_payload_ptr => { - const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.eu_payload_ptr); - hashPtr(err_union_ptr.container_ptr, hasher, mod); - }, - .opt_payload_ptr => { - const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - std.hash.autoHash(hasher, Value.Tag.opt_payload_ptr); - hashPtr(opt_ptr.container_ptr, hasher, mod); - }, - - .the_only_possible_value, - .lazy_align, - .lazy_size, - => return hashInt(ptr_val, hasher, mod), - - else => unreachable, - } - } + pub const slice_ptr_index = 0; + pub const slice_len_index = 1; pub fn slicePtr(val: Value, mod: *Module) Value { - if (val.ip_index != .none) return mod.intern_pool.slicePtr(val.ip_index).toValue(); - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr, - // TODO this should require being a slice tag, and not allow decl_ref, field_ptr, etc. - .decl_ref, .decl_ref_mut, .field_ptr, .elem_ptr, .comptime_field_ptr => val, - else => unreachable, - }; + return mod.intern_pool.slicePtr(val.ip_index).toValue(); } pub fn sliceLen(val: Value, mod: *Module) u64 { - if (val.ip_index != .none) return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod); - return switch (val.tag()) { - .slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod), - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(mod); - } else { - return 1; - } - }, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (decl.ty.zigTypeTag(mod) == .Array) { - return decl.ty.arrayLen(mod); - } else { - return 1; - } - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (payload.field_ty.zigTypeTag(mod) == .Array) { - return payload.field_ty.arrayLen(mod); - } else { - return 1; - } - }, - else => unreachable, - }; + return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod); } /// Asserts the value is a single-item pointer to an array, or an array, @@ -2560,14 +1824,6 @@ pub const Value = struct { switch (val.ip_index) { .undef => return Value.undef, .none => switch (val.tag()) { - // This is the case of accessing an element of an undef array. - .empty_array => unreachable, // out of bounds array index - - .empty_array_sentinel => { - assert(index == 0); // The only valid index for an empty array with sentinel. - return val.castTag(.empty_array_sentinel).?.data; - }, - .bytes => { const byte = val.castTag(.bytes).?.data[index]; return mod.intValue(Type.u8, byte); @@ -2579,128 +1835,101 @@ pub const Value = struct { return mod.intValue(Type.u8, byte); }, - // No matter the index; all the elements are the same! - .repeated => return val.castTag(.repeated).?.data, - .aggregate => return val.castTag(.aggregate).?.data[index], - .slice => return val.castTag(.slice).?.data.ptr.elemValue(mod, index), - - .decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValue(mod, index), - .decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValue(mod, index), - .comptime_field_ptr => return val.castTag(.comptime_field_ptr).?.data.field_val.elemValue(mod, index), - .elem_ptr => { - const data = val.castTag(.elem_ptr).?.data; - return data.array_ptr.elemValue(mod, index + data.index); - }, - .field_ptr => { - const data = val.castTag(.field_ptr).?.data; - if (data.container_ptr.pointerDecl()) |decl_index| { - const container_decl = mod.declPtr(decl_index); - const field_type = data.container_ty.structFieldType(data.field_index, mod); - const field_val = try container_decl.val.fieldValue(field_type, mod, data.field_index); - return field_val.elemValue(mod, index); - } else unreachable; - }, - - // The child type of arrays which have only one possible value need - // to have only one possible value itself. - .the_only_possible_value => return val, - - .opt_payload_ptr => return val.castTag(.opt_payload_ptr).?.data.container_ptr.elemValue(mod, index), - .eu_payload_ptr => return val.castTag(.eu_payload_ptr).?.data.container_ptr.elemValue(mod, index), - - .opt_payload => return val.castTag(.opt_payload).?.data.elemValue(mod, index), - .eu_payload => return val.castTag(.eu_payload).?.data.elemValue(mod, index), else => unreachable, }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { .ptr => |ptr| switch (ptr.addr) { - .@"var" => unreachable, .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), .int, .eu_payload, .opt_payload => unreachable, .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), .elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index), - .field => unreachable, - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .elems => |elems| elems[index].toValue(), - .repeated_elem => |elem| elem.toValue(), + .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| { + const base_decl = mod.declPtr(decl_index); + const field_val = try base_decl.val.fieldValue(mod, field.index); + return field_val.elemValue(mod, index); + } else unreachable, + }, + .aggregate => |aggregate| { + const len = mod.intern_pool.aggregateTypeLen(aggregate.ty); + if (index < len) return switch (aggregate.storage) { + .bytes => |bytes| try mod.intern(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }.toValue(); + assert(index == len); + return mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel.toValue(); }, else => unreachable, }, } } - pub fn isLazyAlign(val: Value) bool { - return val.ip_index == .none and val.tag() == .lazy_align; - } - - pub fn isLazySize(val: Value) bool { - return val.ip_index == .none and val.tag() == .lazy_size; + pub fn isLazyAlign(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| int.storage == .lazy_align, + else => false, + }; } - pub fn isRuntimeValue(val: Value) bool { - return val.ip_index == .none and val.tag() == .runtime_value; + pub fn isLazySize(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int => |int| int.storage == .lazy_size, + else => false, + }; } - pub fn tagIsVariable(val: Value) bool { - return val.ip_index == .none and val.tag() == .variable; + pub fn isRuntimeValue(val: Value, mod: *Module) bool { + return mod.intern_pool.indexToKey(val.ip_index) == .runtime_value; } /// Returns true if a Value is backed by a variable pub fn isVariable(val: Value, mod: *Module) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isVariable(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isVariable(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isVariable(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isVariable(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isVariable(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isVariable(mod), - .decl_ref => { - const decl = mod.declPtr(val.castTag(.decl_ref).?.data); + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => true, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); assert(decl.has_tv); return decl.val.isVariable(mod); }, - .decl_ref_mut => { - const decl = mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index); + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); assert(decl.has_tv); return decl.val.isVariable(mod); }, - - .variable => true, - else => false, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isVariable(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isVariable(mod), + .elem, .field => |base_index| base_index.base.toValue().isVariable(mod), }, else => false, }; } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .variable => false, - else => val.isPtrToThreadLocalInner(mod), - }, - else => val.isPtrToThreadLocalInner(mod), - }; - } - - fn isPtrToThreadLocalInner(val: Value, mod: *Module) bool { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.isPtrToThreadLocalInner(mod), - .comptime_field_ptr => val.castTag(.comptime_field_ptr).?.data.field_val.isPtrToThreadLocalInner(mod), - .elem_ptr => val.castTag(.elem_ptr).?.data.array_ptr.isPtrToThreadLocalInner(mod), - .field_ptr => val.castTag(.field_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .eu_payload_ptr => val.castTag(.eu_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .opt_payload_ptr => val.castTag(.opt_payload_ptr).?.data.container_ptr.isPtrToThreadLocalInner(mod), - .decl_ref => mod.declPtr(val.castTag(.decl_ref).?.data).val.isPtrToThreadLocalInner(mod), - .decl_ref_mut => mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.isPtrToThreadLocalInner(mod), - - .variable => val.castTag(.variable).?.data.is_threadlocal, - else => false, + return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .variable => |variable| variable.is_threadlocal, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), + .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), }, else => false, }; @@ -2714,39 +1943,42 @@ pub const Value = struct { start: usize, end: usize, ) error{OutOfMemory}!Value { - return switch (val.tag()) { - .empty_array_sentinel => if (start == 0 and end == 1) val else Value.initTag(.empty_array), - .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return Tag.str_lit.create(arena, .{ - .index = @intCast(u32, str_lit.index + start), - .len = @intCast(u32, end - start), - }); + return switch (val.ip_index) { + .none => switch (val.tag()) { + .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), + .str_lit => { + const str_lit = val.castTag(.str_lit).?.data; + return Tag.str_lit.create(arena, .{ + .index = @intCast(u32, str_lit.index + start), + .len = @intCast(u32, end - start), + }); + }, + else => unreachable, }, - .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]), - .slice => sliceArray(val.castTag(.slice).?.data.ptr, mod, arena, start, end), - - .decl_ref => sliceArray(mod.declPtr(val.castTag(.decl_ref).?.data).val, mod, arena, start, end), - .decl_ref_mut => sliceArray(mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val, mod, arena, start, end), - .comptime_field_ptr => sliceArray(val.castTag(.comptime_field_ptr).?.data.field_val, mod, arena, start, end), - .elem_ptr => blk: { - const elem_ptr = val.castTag(.elem_ptr).?.data; - break :blk sliceArray(elem_ptr.array_ptr, mod, arena, start + elem_ptr.index, end + elem_ptr.index); + else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), + .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), + .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), + else => unreachable, + }, + .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ + .ty = mod.intern_pool.typeOf(val.ip_index), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = bytes[start..end] }, + .elems => |elems| .{ .elems = elems[start..end] }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, + } })).toValue(), + else => unreachable, }, - - .repeated, - .the_only_possible_value, - => val, - - else => unreachable, }; } - pub fn fieldValue(val: Value, ty: Type, mod: *Module, index: usize) !Value { + pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { switch (val.ip_index) { .undef => return Value.undef, - .none => switch (val.tag()) { .aggregate => { const field_values = val.castTag(.aggregate).?.data; @@ -2757,13 +1989,14 @@ pub const Value = struct { // TODO assert the tag is correct return payload.val; }, - - .the_only_possible_value => return (try ty.onePossibleValue(mod)).?, - else => unreachable, }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try mod.intern(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[index] }, + } }), .elems => |elems| elems[index], .repeated_elem => |elem| elem, }.toValue(), @@ -2785,40 +2018,37 @@ pub const Value = struct { pub fn elemPtr( val: Value, ty: Type, - arena: Allocator, index: usize, mod: *Module, ) Allocator.Error!Value { const elem_ty = ty.elemType2(mod); - const ptr_val = switch (val.ip_index) { - .none => switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr, - else => val, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .ptr => |ptr| switch (ptr.len) { + const ptr_val = switch (mod.intern_pool.indexToKey(val.ip_index)) { + .ptr => |ptr| ptr: { + switch (ptr.addr) { + .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) + return (try mod.intern(.{ .ptr = .{ + .ty = ty.ip_index, + .addr = .{ .elem = .{ + .base = elem.base, + .index = elem.index + index, + } }, + } })).toValue(), + else => {}, + } + break :ptr switch (ptr.len) { .none => val, else => val.slicePtr(mod), - }, - else => val, + }; }, + else => val, }; - - if (ptr_val.ip_index == .none and ptr_val.tag() == .elem_ptr) { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - if (elem_ptr.elem_ty.eql(elem_ty, mod)) { - return Tag.elem_ptr.create(arena, .{ - .array_ptr = elem_ptr.array_ptr, - .elem_ty = elem_ptr.elem_ty, - .index = elem_ptr.index + index, - }); - } - } - return Tag.elem_ptr.create(arena, .{ - .array_ptr = ptr_val, - .elem_ty = elem_ty, - .index = index, - }); + return (try mod.intern(.{ .ptr = .{ + .ty = ty.ip_index, + .addr = .{ .elem = .{ + .base = ptr_val.ip_index, + .index = index, + } }, + } })).toValue(); } pub fn isUndef(val: Value, mod: *Module) bool { @@ -2840,69 +2070,44 @@ pub const Value = struct { /// Returns true if any value contained in `self` is undefined. pub fn anyUndef(val: Value, mod: *Module) !bool { if (val.ip_index == .none) return false; - switch (val.ip_index) { - .undef => return true, + return switch (val.ip_index) { + .undef => true, .none => switch (val.tag()) { - .slice => { - const payload = val.castTag(.slice).?; - const len = payload.data.len.toUnsignedInt(mod); - - for (0..len) |i| { - const elem_val = try payload.data.ptr.elemValue(mod, i); - if (try elem_val.anyUndef(mod)) return true; - } - }, - - .aggregate => { - const payload = val.castTag(.aggregate).?; - for (payload.data) |field| { - if (try field.anyUndef(mod)) return true; - } - }, - else => {}, + .aggregate => for (val.castTag(.aggregate).?.data) |field| { + if (try field.anyUndef(mod)) break true; + } else false, + else => false, }, else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .undef => return true, - .simple_value => |v| if (v == .undefined) return true, - .aggregate => |aggregate| switch (aggregate.storage) { - .elems => |elems| for (elems) |elem| { - if (try anyUndef(elem.toValue(), mod)) return true; - }, - .repeated_elem => |elem| if (try anyUndef(elem.toValue(), mod)) return true, - }, - else => {}, + .undef => true, + .simple_value => |v| v == .undefined, + .ptr => |ptr| switch (ptr.len) { + .none => false, + else => for (0..@intCast(usize, ptr.len.toValue().toUnsignedInt(mod))) |index| { + if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true; + } else false, + }, + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { + if (try anyUndef(elem.toValue(), mod)) break true; + } else false, + else => false, }, - } - - return false; + }; } /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. - pub fn isNull(val: Value, mod: *const Module) bool { + pub fn isNull(val: Value, mod: *Module) bool { return switch (val.ip_index) { .undef => unreachable, .unreachable_value => unreachable, .null_value => true, - .none => switch (val.tag()) { - .opt_payload => false, - - // If it's not one of those two tags then it must be a C pointer value, - // in which case the value 0 is null and other values are non-null. - - .the_only_possible_value => true, - - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - - else => false, - }, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.eqZero(), - inline .u64, .i64 => |x| x == 0, + .int => { + var buf: BigIntSpace = undefined; + return val.toBigInt(&buf, mod).eqZero(); }, .opt => |opt| opt.val == .none, else => false, @@ -2914,53 +2119,28 @@ pub const Value = struct { /// unreachable. For error unions, prefer `errorUnionIsPayload` to find out whether /// something is an error or not because it works without having to figure out the /// string. - pub fn getError(self: Value) ?[]const u8 { - return switch (self.ip_index) { - .undef => unreachable, - .unreachable_value => unreachable, - .none => switch (self.tag()) { - .@"error" => self.castTag(.@"error").?.data.name, - .eu_payload => null, - - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - else => unreachable, + pub fn getError(self: Value, mod: *const Module) ?[]const u8 { + return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.ip_index)) { + .err => |err| err.name.toOptional(), + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| err_name.toOptional(), + .payload => .none, }, else => unreachable, - }; + }); } /// Assumes the type is an error union. Returns true if and only if the value is /// the error union payload, not an error. - pub fn errorUnionIsPayload(val: Value) bool { - return switch (val.ip_index) { - .undef => unreachable, - .none => switch (val.tag()) { - .eu_payload => true, - else => false, - - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - }, - else => false, - }; + pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool { + return mod.intern_pool.indexToKey(val.ip_index).error_union.val == .payload; } /// Value of the optional, null if optional has no payload. pub fn optionalValue(val: Value, mod: *const Module) ?Value { - return switch (val.ip_index) { - .none => if (val.isNull(mod)) null - // Valid for optional representation to be the direct value - // and not use opt_payload. - else if (val.castTag(.opt_payload)) |p| p.data else val, - .null_value => null, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .opt => |opt| switch (opt.val) { - .none => null, - else => opt.val.toValue(), - }, - else => unreachable, - }, + return switch (mod.intern_pool.indexToKey(val.ip_index).opt.val) { + .none => null, + else => |index| index.toValue(), }; } @@ -3001,28 +2181,8 @@ pub const Value = struct { } pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - switch (val.ip_index) { - .undef => return val, - .none => switch (val.tag()) { - .the_only_possible_value => return mod.floatValue(float_ty, 0), // for i0, u0 - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return intToFloatInner(ty.abiAlignment(mod), float_ty, mod); - } - }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - if (opt_sema) |sema| { - return intToFloatInner((try ty.abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return intToFloatInner(ty.abiSize(mod), float_ty, mod); - } - }, - else => unreachable, - }, + return switch (val.ip_index) { + .undef => val, else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { .int => |int| switch (int.storage) { .big_int => |big_int| { @@ -3030,10 +2190,20 @@ pub const Value = struct { return mod.floatValue(float_ty, float); }, inline .u64, .i64 => |x| intToFloatInner(x, float_ty, mod), + .lazy_align => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return intToFloatInner(ty.toType().abiAlignment(mod), float_ty, mod); + }, + .lazy_size => |ty| if (opt_sema) |sema| { + return intToFloatInner((try ty.toType().abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return intToFloatInner(ty.toType().abiSize(mod), float_ty, mod); + }, }, else => unreachable, }, - } + }; } fn intToFloatInner(x: anytype, dest_ty: Type, mod: *Module) !Value { @@ -4768,81 +3938,6 @@ pub const Value = struct { pub const Payload = struct { tag: Tag, - pub const Function = struct { - base: Payload, - data: *Module.Fn, - }; - - pub const ExternFn = struct { - base: Payload, - data: *Module.ExternFn, - }; - - pub const Decl = struct { - base: Payload, - data: Module.Decl.Index, - }; - - pub const Variable = struct { - base: Payload, - data: *Module.Var, - }; - - pub const SubValue = struct { - base: Payload, - data: Value, - }; - - pub const DeclRefMut = struct { - pub const base_tag = Tag.decl_ref_mut; - - base: Payload = Payload{ .tag = base_tag }, - data: Data, - - pub const Data = struct { - decl_index: Module.Decl.Index, - runtime_index: RuntimeIndex, - }; - }; - - pub const PayloadPtr = struct { - base: Payload, - data: struct { - container_ptr: Value, - container_ty: Type, - }, - }; - - pub const ComptimeFieldPtr = struct { - base: Payload, - data: struct { - field_val: Value, - field_ty: Type, - }, - }; - - pub const ElemPtr = struct { - pub const base_tag = Tag.elem_ptr; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - array_ptr: Value, - elem_ty: Type, - index: usize, - }, - }; - - pub const FieldPtr = struct { - pub const base_tag = Tag.field_ptr; - - base: Payload = Payload{ .tag = base_tag }, - data: struct { - container_ptr: Value, - container_ty: Type, - field_index: usize, - }, - }; - pub const Bytes = struct { base: Payload, /// Includes the sentinel, if any. @@ -4861,32 +3956,6 @@ pub const Value = struct { data: []Value, }; - pub const Slice = struct { - base: Payload, - data: struct { - ptr: Value, - len: Value, - }, - - pub const ptr_index = 0; - pub const len_index = 1; - }; - - pub const Ty = struct { - base: Payload, - data: Type, - }; - - pub const Error = struct { - base: Payload = .{ .tag = .@"error" }, - data: struct { - /// `name` is owned by `Module` and will be valid for the entire - /// duration of the compilation. - /// TODO revisit this when we have the concept of the error tag type - name: []const u8, - }, - }; - pub const InferredAlloc = struct { pub const base_tag = Tag.inferred_alloc; diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 6cccf77ee0..555cda135d 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -533,8 +533,8 @@ type_tag_handlers = { 'empty_struct_literal': lambda payload: '@TypeOf(.{})', 'anyerror_void_error_union': lambda payload: 'anyerror!void', - 'const_slice_u8': lambda payload: '[]const u8', - 'const_slice_u8_sentinel_0': lambda payload: '[:0]const u8', + 'slice_const_u8': lambda payload: '[]const u8', + 'slice_const_u8_sentinel_0': lambda payload: '[:0]const u8', 'fn_noreturn_no_args': lambda payload: 'fn() noreturn', 'fn_void_no_args': lambda payload: 'fn() void', 'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.Naked) noreturn', @@ -560,7 +560,7 @@ type_tag_handlers = { 'many_mut_pointer': lambda payload: '[*]%s' % type_Type_SummaryProvider(payload), 'c_const_pointer': lambda payload: '[*c]const %s' % type_Type_SummaryProvider(payload), 'c_mut_pointer': lambda payload: '[*c]%s' % type_Type_SummaryProvider(payload), - 'const_slice': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload), + 'slice_const': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload), 'mut_slice': lambda payload: '[]%s' % type_Type_SummaryProvider(payload), 'int_signed': lambda payload: 'i%d' % payload.unsigned, 'int_unsigned': lambda payload: 'u%d' % payload.unsigned, diff --git a/tools/stage2_gdb_pretty_printers.py b/tools/stage2_gdb_pretty_printers.py index bd64916536..f10e924855 100644 --- a/tools/stage2_gdb_pretty_printers.py +++ b/tools/stage2_gdb_pretty_printers.py @@ -18,7 +18,7 @@ class TypePrinter: 'many_mut_pointer': 'Type.Payload.ElemType', 'c_const_pointer': 'Type.Payload.ElemType', 'c_mut_pointer': 'Type.Payload.ElemType', - 'const_slice': 'Type.Payload.ElemType', + 'slice_const': 'Type.Payload.ElemType', 'mut_slice': 'Type.Payload.ElemType', 'optional': 'Type.Payload.ElemType', 'optional_single_mut_pointer': 'Type.Payload.ElemType', -- cgit v1.2.3 From 1a4626d2cf8b9985833f97b6fea6ea03011ada4e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 25 May 2023 05:47:25 -0400 Subject: InternPool: remove more legacy values Reinstate some tags that will be needed for comptime init. --- src/Air.zig | 7 - src/InternPool.zig | 60 +- src/Liveness.zig | 8 +- src/Liveness/Verify.zig | 5 +- src/Module.zig | 266 ++--- src/Sema.zig | 2498 ++++++++++++++++++++++-------------------- src/TypedValue.zig | 24 +- src/arch/aarch64/CodeGen.zig | 2 - src/arch/arm/CodeGen.zig | 2 - src/arch/riscv64/CodeGen.zig | 2 - src/arch/sparc64/CodeGen.zig | 2 - src/arch/wasm/CodeGen.zig | 27 +- src/arch/x86_64/CodeGen.zig | 5 +- src/codegen.zig | 144 --- src/codegen/c.zig | 21 +- src/codegen/llvm.zig | 326 +----- src/codegen/spirv.zig | 73 +- src/print_air.zig | 3 +- src/value.zig | 979 +++++++++-------- 19 files changed, 2063 insertions(+), 2391 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index 9dcbe174ec..4f36cf8bc1 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -400,8 +400,6 @@ pub const Inst = struct { /// A comptime-known value. Uses the `ty_pl` field, payload is index of /// `values` array. constant, - /// A comptime-known type. Uses the `ty` field. - const_ty, /// A comptime-known value via an index into the InternPool. /// Uses the `interned` field. interned, @@ -1257,8 +1255,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .error_set_has_value, => return Type.bool, - .const_ty => return Type.type, - .alloc, .ret_ptr, .err_return_trace, @@ -1435,7 +1431,6 @@ pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); return switch (air_tags[inst_index]) { - .const_ty => air_datas[inst_index].ty, .interned => air_datas[inst_index].interned.toType(), else => unreachable, }; @@ -1501,7 +1496,6 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .constant => return air.values[air_datas[inst_index].ty_pl.payload], - .const_ty => unreachable, .interned => return air_datas[inst_index].interned.toValue(), else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } @@ -1658,7 +1652,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { .cmp_vector, .cmp_vector_optimized, .constant, - .const_ty, .interned, .is_null, .is_non_null, diff --git a/src/InternPool.zig b/src/InternPool.zig index ec4d1df45f..1dc43a467d 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -553,10 +553,10 @@ pub const Key = union(enum) { pub const Addr = union(enum) { decl: Module.Decl.Index, mut_decl: MutDecl, + comptime_field: Index, int: Index, eu_payload: Index, opt_payload: Index, - comptime_field: Index, elem: BaseIndex, field: BaseIndex, @@ -703,24 +703,27 @@ pub const Key = union(enum) { .aggregate => |aggregate| { std.hash.autoHash(hasher, aggregate.ty); switch (ip.indexToKey(aggregate.ty)) { - .array_type => |array_type| if (array_type.child == .u8_type) switch (aggregate.storage) { - .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte), - .elems => |elems| { - var buffer: Key.Int.Storage.BigIntSpace = undefined; - for (elems) |elem| std.hash.autoHash( - hasher, - ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch - unreachable, - ); - }, - .repeated_elem => |elem| { - const len = ip.aggregateTypeLen(aggregate.ty); - var buffer: Key.Int.Storage.BigIntSpace = undefined; - const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch - unreachable; - var i: u64 = 0; - while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); - }, + .array_type => |array_type| if (array_type.child == .u8_type) { + switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| std.hash.autoHash(hasher, byte), + .elems => |elems| { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + for (elems) |elem| std.hash.autoHash( + hasher, + ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable, + ); + }, + .repeated_elem => |elem| { + const len = ip.aggregateTypeLen(aggregate.ty); + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const byte = ip.indexToKey(elem).int.storage.toBigInt(&buffer).to(u8) catch + unreachable; + var i: u64 = 0; + while (i < len) : (i += 1) std.hash.autoHash(hasher, byte); + }, + } + return; }, else => {}, } @@ -2860,6 +2863,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .array_type => |array_type| { assert(array_type.child != .none); + assert(array_type.sentinel == .none or ip.typeOf(array_type.sentinel) == array_type.child); if (std.math.cast(u32, array_type.len)) |len| { if (array_type.sentinel == .none) { @@ -3230,7 +3234,23 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .int => |int| b: { - assert(int.ty == .comptime_int_type or ip.indexToKey(int.ty) == .int_type); + switch (int.ty) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .comptime_int_type, + => {}, + else => assert(ip.indexToKey(int.ty) == .int_type), + } switch (int.storage) { .u64, .i64, .big_int => {}, .lazy_align, .lazy_size => |lazy_ty| { diff --git a/src/Liveness.zig b/src/Liveness.zig index 856123fa9d..c30708e140 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -323,7 +323,6 @@ pub fn categorizeOperand( .alloc, .ret_ptr, .constant, - .const_ty, .interned, .trap, .breakpoint, @@ -975,7 +974,6 @@ fn analyzeInst( => return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }), .constant, - .const_ty, .interned, => unreachable, @@ -1272,7 +1270,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty, .interned => continue, + .constant, .interned => continue, else => {}, } @@ -1308,7 +1306,7 @@ fn analyzeOperands( // Don't compute any liveness for constants switch (inst_tags[operand]) { - .constant, .const_ty, .interned => continue, + .constant, .interned => continue, else => {}, } @@ -1842,7 +1840,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); switch (inst_tags[operand]) { - .constant, .const_ty, .interned => return, + .constant, .interned => return, else => {}, } diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 923e6f5658..703d561559 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -43,7 +43,6 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .alloc, .ret_ptr, .constant, - .const_ty, .interned, .breakpoint, .dbg_stmt, @@ -557,7 +556,7 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { const operand = Air.refToIndexAllowNone(op_ref) orelse return; switch (self.air.instructions.items(.tag)[operand]) { - .constant, .const_ty, .interned => {}, + .constant, .interned => {}, else => { if (dies) { if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); @@ -579,7 +578,7 @@ fn verifyInst( } const tag = self.air.instructions.items(.tag); switch (tag[inst]) { - .constant, .const_ty, .interned => unreachable, + .constant, .interned => unreachable, else => { if (self.liveness.isUnused(inst)) { assert(!self.live.contains(inst)); diff --git a/src/Module.zig b/src/Module.zig index fa24c237b4..47f7643b9f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -85,20 +85,13 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{}, /// Keys are fully resolved file paths. This table owns the keys and values. embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, -/// This is a temporary addition to stage2 in order to match legacy behavior, -/// however the end-game once the lang spec is settled will be to use a global -/// InternPool for comptime memoized objects, making this behavior consistent across all types, -/// not only string literals. Or, we might decide to not guarantee string literals -/// to have equal comptime pointers, in which case this field can be deleted (perhaps -/// the commit that introduced it can simply be reverted). -/// This table uses an optional index so that when a Decl is destroyed, the string literal -/// is still reclaimable by a future Decl. -string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{}, -string_literal_bytes: ArrayListUnmanaged(u8) = .{}, - /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, +/// This is currently only used for string literals, however the end-game once the lang spec +/// is settled will be to make this behavior consistent across all types. +memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, + /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. @@ -208,39 +201,6 @@ pub const CImportError = struct { } }; -pub const StringLiteralContext = struct { - bytes: *ArrayListUnmanaged(u8), - - pub const Key = struct { - index: u32, - len: u32, - }; - - pub fn eql(self: @This(), a: Key, b: Key) bool { - _ = self; - return a.index == b.index and a.len == b.len; - } - - pub fn hash(self: @This(), x: Key) u64 { - const x_slice = self.bytes.items[x.index..][0..x.len]; - return std.hash_map.hashString(x_slice); - } -}; - -pub const StringLiteralAdapter = struct { - bytes: *ArrayListUnmanaged(u8), - - pub fn eql(self: @This(), a_slice: []const u8, b: StringLiteralContext.Key) bool { - const b_slice = self.bytes.items[b.index..][0..b.len]; - return mem.eql(u8, a_slice, b_slice); - } - - pub fn hash(self: @This(), adapted_key: []const u8) u64 { - _ = self; - return std.hash_map.hashString(adapted_key); - } -}; - const MonomorphedFuncsSet = std.HashMapUnmanaged( Fn.Index, void, @@ -660,14 +620,8 @@ pub const Decl = struct { } mod.destroyFunc(func); } + _ = mod.memoized_decls.remove(decl.val.ip_index); if (decl.value_arena) |value_arena| { - if (decl.owns_tv) { - if (decl.val.castTag(.str_lit)) |str_lit| { - mod.string_literal_table.getPtrContext(str_lit.data, .{ - .bytes = &mod.string_literal_bytes, - }).?.* = .none; - } - } value_arena.deinit(gpa); decl.value_arena = null; decl.has_tv = false; @@ -834,7 +788,7 @@ pub const Decl = struct { pub fn getStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; if (decl.val.ip_index == .none) return .none; - return mod.intern_pool.indexToStructType(decl.val.ip_index); + return mod.intern_pool.indexToStructType(decl.val.toIntern()); } /// If the Decl has a value and it is a union, return it, @@ -875,7 +829,7 @@ pub const Decl = struct { return switch (decl.val.ip_index) { .empty_struct_type => .none, .none => .none, - else => switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), .struct_type => |struct_type| struct_type.namespace, .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), @@ -919,7 +873,7 @@ pub const Decl = struct { pub fn isExtern(decl: Decl, mod: *Module) bool { assert(decl.has_tv); - return switch (mod.intern_pool.indexToKey(decl.val.ip_index)) { + return switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { .variable => |variable| variable.is_extern, .extern_func => true, else => false, @@ -1577,11 +1531,11 @@ pub const Fn = struct { ip: *InternPool, gpa: Allocator, ) !void { - switch (err_set_ty.ip_index) { + switch (err_set_ty.toIntern()) { .anyerror_type => { self.is_anyerror = true; }, - else => switch (ip.indexToKey(err_set_ty.ip_index)) { + else => switch (ip.indexToKey(err_set_ty.toIntern())) { .error_set_type => |error_set_type| { for (error_set_type.names) |name| { try self.errors.put(gpa, name, {}); @@ -3396,8 +3350,7 @@ pub fn deinit(mod: *Module) void { mod.namespaces_free_list.deinit(gpa); mod.allocated_namespaces.deinit(gpa); - mod.string_literal_table.deinit(gpa); - mod.string_literal_bytes.deinit(gpa); + mod.memoized_decls.deinit(gpa); mod.intern_pool.deinit(gpa); } @@ -4702,7 +4655,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return true; } - if (mod.intern_pool.indexToFunc(decl_tv.val.ip_index).unwrap()) |func_index| { + if (mod.intern_pool.indexToFunc(decl_tv.val.toIntern()).unwrap()) |func_index| { const func = mod.funcPtr(func_index); const owns_tv = func.owner_decl == decl_index; if (owns_tv) { @@ -4749,10 +4702,10 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.owns_tv = false; var queue_linker_work = false; var is_extern = false; - switch (decl_tv.val.ip_index) { + switch (decl_tv.val.toIntern()) { .generic_poison => unreachable, .unreachable_value => unreachable, - else => switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { .variable => |variable| if (variable.decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; @@ -4792,7 +4745,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.ip_index)) { + const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { .variable => .variable, .extern_func, .func => .function, else => .constant, @@ -6497,40 +6450,33 @@ pub fn populateTestFunctions( const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions // decl reference it as a slice. - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.allocator(); - - const test_fn_vals = try arena.alloc(Value, mod.test_functions.count()); - const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ - .ty = try mod.arrayType(.{ - .len = test_fn_vals.len, - .child = test_fn_ty.ip_index, - .sentinel = .none, - }), - .val = try Value.Tag.aggregate.create(arena, test_fn_vals), - }); - const array_decl = mod.declPtr(array_decl_index); + const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count()); + defer gpa.free(test_fn_vals); // Add a dependency on each test name and function pointer. - try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); + var array_decl_dependencies = std.ArrayListUnmanaged(Decl.Index){}; + defer array_decl_dependencies.deinit(gpa); + try array_decl_dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); - for (mod.test_functions.keys(), 0..) |test_decl_index, i| { + for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); - const test_name_slice = mem.sliceTo(test_decl.name, 0); const test_name_decl_index = n: { - var name_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer name_decl_arena.deinit(); - const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); - const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type }), - .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), + const test_decl_name = mem.span(test_decl.name); + const test_name_decl_ty = try mod.arrayType(.{ + .len = test_decl_name.len, + .child = .u8_type, + }); + const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + .ty = test_name_decl_ty, + .val = (try mod.intern(.{ .aggregate = .{ + .ty = test_name_decl_ty.toIntern(), + .storage = .{ .bytes = test_decl_name }, + } })).toValue(), }); - try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); break :n test_name_decl_index; }; - array_decl.dependencies.putAssumeCapacityNoClobber(test_decl_index, .normal); - array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal); + array_decl_dependencies.appendAssumeCapacity(test_decl_index); + array_decl_dependencies.appendAssumeCapacity(test_name_decl_index); try mod.linkerUpdateDecl(test_name_decl_index); const test_fn_fields = .{ @@ -6541,36 +6487,51 @@ pub fn populateTestFunctions( } }), // func try mod.intern(.{ .ptr = .{ - .ty = test_decl.ty.ip_index, + .ty = test_decl.ty.toIntern(), .addr = .{ .decl = test_decl_index }, } }), // async_frame_size null_usize, }; - test_fn_vals[i] = (try mod.intern(.{ .aggregate = .{ - .ty = test_fn_ty.ip_index, + test_fn_val.* = try mod.intern(.{ .aggregate = .{ + .ty = test_fn_ty.toIntern(), .storage = .{ .elems = &test_fn_fields }, - } })).toValue(); + } }); + } + + const array_decl_ty = try mod.arrayType(.{ + .len = test_fn_vals.len, + .child = test_fn_ty.toIntern(), + .sentinel = .none, + }); + const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + .ty = array_decl_ty, + .val = (try mod.intern(.{ .aggregate = .{ + .ty = array_decl_ty.toIntern(), + .storage = .{ .elems = test_fn_vals }, + } })).toValue(), + }); + for (array_decl_dependencies.items) |array_decl_dependency| { + try mod.declareDeclDependency(array_decl_index, array_decl_dependency); } - try array_decl.finalizeNewArena(&new_decl_arena); break :d array_decl_index; }; try mod.linkerUpdateDecl(array_decl_index); { const new_ty = try mod.ptrType(.{ - .elem_type = test_fn_ty.ip_index, + .elem_type = test_fn_ty.toIntern(), .is_const = true, .size = .Slice, }); const new_val = decl.val; const new_init = try mod.intern(.{ .ptr = .{ - .ty = new_ty.ip_index, + .ty = new_ty.toIntern(), .addr = .{ .decl = array_decl_index }, - .len = (try mod.intValue(Type.usize, mod.test_functions.count())).ip_index, + .len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(), } }); - mod.intern_pool.mutateVarInit(decl.val.ip_index, new_init); + mod.intern_pool.mutateVarInit(decl.val.toIntern(), new_init); // Since we are replacing the Decl's value we must perform cleanup on the // previous value. @@ -6650,47 +6611,32 @@ fn reportRetryableFileError( } pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { - switch (val.ip_index) { - .none => switch (val.tag()) { - .aggregate => { - for (val.castTag(.aggregate).?.data) |field_val| { - mod.markReferencedDeclsAlive(field_val); - } - }, - .@"union" => { - const data = val.castTag(.@"union").?.data; - mod.markReferencedDeclsAlive(data.tag); - mod.markReferencedDeclsAlive(data.val); - }, - else => {}, + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| mod.markDeclIndexAlive(variable.decl), + .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), + .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .error_union => |error_union| switch (error_union.val) { + .err_name => {}, + .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .variable => |variable| mod.markDeclIndexAlive(variable.decl), - .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), - .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), - .error_union => |error_union| switch (error_union.val) { - .err_name => {}, - .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), - }, - .ptr => |ptr| { - switch (ptr.addr) { - .decl => |decl| mod.markDeclIndexAlive(decl), - .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), - .int, .comptime_field => {}, - .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), - .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), - } - if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); - }, - .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), - .aggregate => |aggregate| for (aggregate.storage.values()) |elem| - mod.markReferencedDeclsAlive(elem.toValue()), - .un => |un| { - mod.markReferencedDeclsAlive(un.tag.toValue()); - mod.markReferencedDeclsAlive(un.val.toValue()); - }, - else => {}, + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| mod.markDeclIndexAlive(decl), + .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), + .int, .comptime_field => {}, + .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), + .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), + } + if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); + }, + .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| + mod.markReferencedDeclsAlive(elem.toValue()), + .un => |un| { + mod.markReferencedDeclsAlive(un.tag.toValue()); + mod.markReferencedDeclsAlive(un.val.toValue()); }, + else => {}, } } @@ -6796,11 +6742,11 @@ pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type } pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ .elem_type = child_type.ip_index }); + return ptrType(mod, .{ .elem_type = child_type.toIntern() }); } pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { - return ptrType(mod, .{ .elem_type = child_type.ip_index, .is_const = true }); + return ptrType(mod, .{ .elem_type = child_type.toIntern(), .is_const = true }); } pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { @@ -6871,9 +6817,9 @@ pub fn errorSetFromUnsortedNames( pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { if (ty.isPtrLikeOptional(mod)) { const i = try intern(mod, .{ .opt = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .val = try intern(mod, .{ .ptr = .{ - .ty = ty.childType(mod).ip_index, + .ty = ty.childType(mod).toIntern(), .addr = .{ .int = try intern(mod, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = x }, @@ -6890,7 +6836,7 @@ pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { assert(ty.zigTypeTag(mod) == .Pointer); const i = try intern(mod, .{ .ptr = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .addr = .{ .int = try intern(mod, .{ .int = .{ .ty = .usize_type, .storage = .{ .u64 = x }, @@ -6906,7 +6852,7 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er assert(tag == .Enum); } const i = try intern(mod, .{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = tag_int, } }); return i.toValue(); @@ -6917,12 +6863,12 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value { const ip = &mod.intern_pool; const gpa = mod.gpa; - const enum_type = ip.indexToKey(ty.ip_index).enum_type; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; if (enum_type.values.len == 0) { // Auto-numbered fields. return (try ip.get(gpa, .{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = try ip.get(gpa, .{ .int = .{ .ty = enum_type.tag_ty, .storage = .{ .u64 = field_index }, @@ -6931,7 +6877,7 @@ pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.E } return (try ip.get(gpa, .{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = enum_type.values[field_index], } })).toValue(); } @@ -6950,7 +6896,7 @@ pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value { const i = try intern(mod, .{ .int = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .big_int = x }, } }); return i.toValue(); @@ -6958,7 +6904,7 @@ pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Valu pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { const i = try intern(mod, .{ .int = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .u64 = x }, } }); return i.toValue(); @@ -6966,7 +6912,7 @@ pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { const i = try intern(mod, .{ .int = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .i64 = x }, } }); return i.toValue(); @@ -6974,9 +6920,9 @@ pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value { const i = try intern(mod, .{ .un = .{ - .ty = union_ty.ip_index, - .tag = tag.ip_index, - .val = val.ip_index, + .ty = union_ty.toIntern(), + .tag = tag.toIntern(), + .val = val.toIntern(), } }); return i.toValue(); } @@ -6993,7 +6939,7 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { else => unreachable, }; const i = try intern(mod, .{ .float = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = storage, } }); return i.toValue(); @@ -7001,9 +6947,9 @@ pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value { const ip = &mod.intern_pool; - assert(ip.isOptionalType(opt_ty.ip_index)); + assert(ip.isOptionalType(opt_ty.toIntern())); const result = try ip.get(mod.gpa, .{ .opt = .{ - .ty = opt_ty.ip_index, + .ty = opt_ty.toIntern(), .val = .none, } }); return result.toValue(); @@ -7042,7 +6988,7 @@ pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { assert(!val.isUndef(mod)); - const key = mod.intern_pool.indexToKey(val.ip_index); + const key = mod.intern_pool.indexToKey(val.toIntern()); switch (key.int.storage) { .i64 => |x| { if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted); @@ -7221,19 +7167,19 @@ pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.I /// * Not a struct. pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { if (ty.ip_index == .none) return null; - const struct_index = mod.intern_pool.indexToStructType(ty.ip_index).unwrap() orelse return null; + const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null; return mod.structPtr(struct_index); } pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { if (ty.ip_index == .none) return null; - const union_index = mod.intern_pool.indexToUnionType(ty.ip_index).unwrap() orelse return null; + const union_index = mod.intern_pool.indexToUnionType(ty.toIntern()).unwrap() orelse return null; return mod.unionPtr(union_index); } pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { if (ty.ip_index == .none) return null; - return mod.intern_pool.indexToFuncType(ty.ip_index); + return mod.intern_pool.indexToFuncType(ty.toIntern()); } pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { @@ -7243,7 +7189,7 @@ pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex { if (ty.ip_index == .none) return .none; - return mod.intern_pool.indexToInferredErrorSetType(ty.ip_index); + return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern()); } pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { @@ -7268,5 +7214,5 @@ pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQu } pub fn toEnum(mod: *Module, comptime E: type, val: Value) E { - return mod.intern_pool.toEnum(E, val.ip_index); + return mod.intern_pool.toEnum(E, val.toIntern()); } diff --git a/src/Sema.zig b/src/Sema.zig index d9b346e638..4478f26bf4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1866,9 +1866,9 @@ fn resolveConstMaybeUndefVal( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| { - switch (val.ip_index) { + switch (val.toIntern()) { .generic_poison => return error.GenericPoison, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, }, @@ -1887,10 +1887,10 @@ fn resolveConstValue( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { - switch (val.ip_index) { + switch (val.toIntern()) { .generic_poison => return error.GenericPoison, .undef => return sema.failWithUseOfUndef(block, src), - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { .undef => return sema.failWithUseOfUndef(block, src), .variable => return sema.failWithNeededComptime(block, src, reason), else => return val, @@ -1930,7 +1930,7 @@ fn resolveMaybeUndefVal( switch (val.ip_index) { .generic_poison => return error.GenericPoison, .none => return val, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { .variable => return null, else => return val, }, @@ -1950,7 +1950,7 @@ fn resolveMaybeUndefValIntable( while (true) switch (check.ip_index) { .generic_poison => return error.GenericPoison, .none => break, - else => switch (sema.mod.intern_pool.indexToKey(check.ip_index)) { + else => switch (sema.mod.intern_pool.indexToKey(check.toIntern())) { .variable => return null, .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => return null, @@ -2007,7 +2007,6 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, - .const_ty => return air_datas[i].ty.toValue(), .interned => return air_datas[i].interned.toValue(), else => return null, } @@ -2490,7 +2489,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE }); try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ - .ty = ptr_ty.ip_index, + .ty = ptr_ty.toIntern(), .addr = .{ .mut_decl = .{ .decl = iac.data.decl_index, .runtime_index = block.runtime_index, @@ -2988,7 +2987,7 @@ fn zirEnumDecl( if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); } - incomplete_enum.setTagType(&mod.intern_pool, ty.ip_index); + incomplete_enum.setTagType(&mod.intern_pool, ty.toIntern()); break :ty ty; } else if (fields_len == 0) { break :ty try mod.intType(.unsigned, 0); @@ -2998,7 +2997,7 @@ fn zirEnumDecl( } }; - if (small.nonexhaustive and int_tag_ty.ip_index != .comptime_int_type) { + if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } @@ -3051,7 +3050,7 @@ fn zirEnumDecl( else => |e| return e, }; last_tag_val = tag_val; - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.toIntern())) |other_index| { const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, @@ -3071,7 +3070,7 @@ fn zirEnumDecl( else try mod.intValue(int_tag_ty, 0); last_tag_val = tag_val; - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.ip_index)) |other_index| { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, tag_val.toIntern())) |other_index| { const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { @@ -3742,7 +3741,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com try sema.maybeQueueFuncBodyAnalysis(decl_index); sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ - .ty = final_ptr_ty.ip_index, + .ty = final_ptr_ty.toIntern(), .addr = if (var_is_mut) .{ .mut_decl = .{ .decl = decl_index, .runtime_index = block.runtime_index, @@ -3842,7 +3841,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com block.instructions.shrinkRetainingCapacity(search_index); try sema.maybeQueueFuncBodyAnalysis(new_decl_index); sema.air_values.items[value_index] = (try sema.mod.intern(.{ .ptr = .{ - .ty = final_elem_ty.ip_index, + .ty = final_elem_ty.toIntern(), .addr = .{ .decl = new_decl_index }, } })).toValue(); // if bitcast ty ref needs to be made const, make_ptr_const @@ -4341,12 +4340,12 @@ fn validateUnionInit( block.instructions.shrinkRetainingCapacity(first_block_index); var union_val = try mod.intern(.{ .un = .{ - .ty = union_ty.ip_index, - .tag = tag_val.ip_index, - .val = val.ip_index, + .ty = union_ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val.toIntern(), } }); if (make_runtime) union_val = try mod.intern(.{ .runtime_value = .{ - .ty = union_ty.ip_index, + .ty = union_ty.toIntern(), .val = union_val, } }); const union_init = try sema.addConstant(union_ty, union_val.toValue()); @@ -4417,7 +4416,7 @@ fn validateStructInit( if (field_ptr != 0) continue; const default_val = struct_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4476,15 +4475,14 @@ fn validateStructInit( // We collect the comptime field values in case the struct initialization // ends up being comptime-known. - const field_values = try sema.gpa.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); - defer sema.gpa.free(field_values); + const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); field: for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) { // Determine whether the value stored to this pointer is comptime-known. const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { - field_values[i] = opv.ip_index; + field_values[i] = opv.toIntern(); continue; } @@ -4549,7 +4547,7 @@ fn validateStructInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - field_values[i] = val.ip_index; + field_values[i] = val.toIntern(); } else if (require_comptime) { const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known"); @@ -4563,7 +4561,7 @@ fn validateStructInit( } const default_val = struct_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { @@ -4583,7 +4581,7 @@ fn validateStructInit( } continue; } - field_values[i] = default_val.ip_index; + field_values[i] = default_val.toIntern(); } if (root_msg) |msg| { @@ -4607,11 +4605,11 @@ fn validateStructInit( block.instructions.shrinkRetainingCapacity(first_block_index); var struct_val = try mod.intern(.{ .aggregate = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .storage = .{ .elems = field_values }, } }); if (make_runtime) struct_val = try mod.intern(.{ .runtime_value = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .val = struct_val, } }); const struct_init = try sema.addConstant(struct_ty, struct_val.toValue()); @@ -4659,7 +4657,7 @@ fn zirValidateArrayInit( var i = instrs.len; while (i < array_len) : (i += 1) { const default_val = array_ty.structFieldDefaultValue(i, mod); - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4710,8 +4708,7 @@ fn zirValidateArrayInit( // Collect the comptime element values in case the array literal ends up // being comptime-known. const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel(mod)); - const element_vals = try sema.gpa.alloc(InternPool.Index, array_len_s); - defer sema.gpa.free(element_vals); + const element_vals = try sema.arena.alloc(InternPool.Index, array_len_s); const opt_opv = try sema.typeHasOnePossibleValue(array_ty); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); @@ -4721,13 +4718,13 @@ fn zirValidateArrayInit( if (array_ty.isTuple(mod)) { if (try array_ty.structFieldValueComptime(mod, i)) |opv| { - element_vals[i] = opv.ip_index; + element_vals[i] = opv.toIntern(); continue; } } else { // Array has one possible value, so value is always comptime-known if (opt_opv) |opv| { - element_vals[i] = opv.ip_index; + element_vals[i] = opv.toIntern(); continue; } } @@ -4788,7 +4785,7 @@ fn zirValidateArrayInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - element_vals[i] = val.ip_index; + element_vals[i] = val.toIntern(); } else { array_is_comptime = false; } @@ -4800,7 +4797,7 @@ fn zirValidateArrayInit( if (array_is_comptime) { if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| { - switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .comptime_field => return, // This store was validated by the individual elem ptrs. else => {}, @@ -4813,17 +4810,17 @@ fn zirValidateArrayInit( // instead a single `store` to the array_ptr with a comptime struct value. // Also to populate the sentinel value, if any. if (array_ty.sentinel(mod)) |sentinel_val| { - element_vals[instrs.len] = sentinel_val.ip_index; + element_vals[instrs.len] = sentinel_val.toIntern(); } block.instructions.shrinkRetainingCapacity(first_block_index); var array_val = try mod.intern(.{ .aggregate = .{ - .ty = array_ty.ip_index, + .ty = array_ty.toIntern(), .storage = .{ .elems = element_vals }, } }); if (make_runtime) array_val = try mod.intern(.{ .runtime_value = .{ - .ty = array_ty.ip_index, + .ty = array_ty.toIntern(), .val = array_val, } }); const array_init = try sema.addConstant(array_ty, array_val.toValue()); @@ -5144,41 +5141,26 @@ fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air // expression of a variable declaration. const mod = sema.mod; const gpa = sema.gpa; - const string_bytes = &mod.string_literal_bytes; - const StringLiteralAdapter = Module.StringLiteralAdapter; - const StringLiteralContext = Module.StringLiteralContext; - try string_bytes.ensureUnusedCapacity(gpa, zir_bytes.len); - const gop = try mod.string_literal_table.getOrPutContextAdapted(gpa, zir_bytes, StringLiteralAdapter{ - .bytes = string_bytes, - }, StringLiteralContext{ - .bytes = string_bytes, + const ty = try mod.arrayType(.{ + .len = zir_bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, }); + const val = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = zir_bytes }, + } }); + const gop = try mod.memoized_decls.getOrPut(gpa, val); if (!gop.found_existing) { - gop.key_ptr.* = .{ - .index = @intCast(u32, string_bytes.items.len), - .len = @intCast(u32, zir_bytes.len), - }; - string_bytes.appendSliceAssumeCapacity(zir_bytes); - gop.value_ptr.* = .none; - } - const decl_index = gop.value_ptr.unwrap() orelse di: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const decl_index = try anon_decl.finish( - try Type.array(anon_decl.arena(), gop.key_ptr.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*), - 0, // default alignment - ); - - // Needed so that `Decl.clearValues` will additionally set the corresponding - // string literal table value back to `Decl.OptionalIndex.none`. - mod.declPtr(decl_index).owns_tv = true; + const decl_index = try anon_decl.finish(ty, val.toValue(), 0); - gop.value_ptr.* = decl_index.toOptional(); - break :di decl_index; - }; - return sema.analyzeDeclRef(decl_index); + gop.key_ptr.* = val; + gop.value_ptr.* = decl_index; + } + return sema.analyzeDeclRef(gop.value_ptr.*); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6218,7 +6200,7 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { const mod = sema.mod; const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null; if (func_val.isUndef(mod)) return null; - const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .extern_func => |extern_func| extern_func.decl, .func => |func| mod.funcPtr(func.index).owner_decl, .ptr => |ptr| switch (ptr.addr) { @@ -6792,7 +6774,7 @@ fn analyzeCall( if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err); return err; }; - const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), @@ -6996,7 +6978,7 @@ fn analyzeCall( } break :blk bare_return_type; }; - new_fn_info.return_type = fn_ret_ty.ip_index; + new_fn_info.return_type = fn_ret_ty.toIntern(); const parent_fn_ret_ty = sema.fn_ret_ty; sema.fn_ret_ty = fn_ret_ty; defer sema.fn_ret_ty = parent_fn_ret_ty; @@ -7289,7 +7271,7 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.ip_index) { + switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. @@ -7328,7 +7310,7 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.ip_index) { + switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. @@ -7426,7 +7408,7 @@ fn instantiateGenericCall( const gpa = sema.gpa; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .func => |function| function.index, .ptr => |ptr| mod.declPtr(ptr.addr.decl).getFunctionIndex(mod).unwrap().?, else => unreachable, @@ -7911,7 +7893,7 @@ fn resolveGenericInstantiationType( } new_decl.val = (try mod.intern(.{ .func = .{ - .ty = new_decl.ty.ip_index, + .ty = new_decl.ty.toIntern(), .index = new_func, } })).toValue(); new_decl.@"align" = 0; @@ -7932,7 +7914,7 @@ fn resolveGenericInstantiationType( fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { const mod = sema.mod; - const tuple = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + const tuple = switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| tuple, else => return, }; @@ -7957,7 +7939,7 @@ fn emitDbgInline( if (old_func == new_func) return; try sema.air_values.append(sema.gpa, (try sema.mod.intern(.{ .func = .{ - .ty = new_func_ty.ip_index, + .ty = new_func_ty.toIntern(), .index = new_func, } })).toValue()); _ = try block.addInst(.{ @@ -8019,7 +8001,7 @@ fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.checkVectorElemType(block, elem_type_src, elem_type); const vector_type = try mod.vectorType(.{ .len = len, - .child = elem_type.ip_index, + .child = elem_type.toIntern(), }); return sema.addType(vector_type); } @@ -8129,7 +8111,7 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const kv = try sema.mod.getErrorValue(name); const error_set_type = try mod.singleErrorSetType(kv.key); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), } })).toValue()); } @@ -8149,7 +8131,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } - const err_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; return sema.addConstant(Type.err_int, try mod.intValue( Type.err_int, (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value, @@ -8240,7 +8222,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); // Anything merged with anyerror is anyerror. - if (lhs_ty.ip_index == .anyerror_type or rhs_ty.ip_index == .anyerror_type) { + if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) { return Air.Inst.Ref.anyerror_type; } @@ -8445,8 +8427,8 @@ fn analyzeOptionalPayloadPtr( _ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); } return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ - .ty = child_pointer.ip_index, - .addr = .{ .opt_payload = ptr_val.ip_index }, + .ty = child_pointer.toIntern(), + .addr = .{ .opt_payload = ptr_val.toIntern() }, } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { @@ -8455,8 +8437,8 @@ fn analyzeOptionalPayloadPtr( } // The same Value represents the pointer to the optional and the payload. return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ - .ty = child_pointer.ip_index, - .addr = .{ .opt_payload = ptr_val.ip_index }, + .ty = child_pointer.toIntern(), + .addr = .{ .opt_payload = ptr_val.toIntern() }, } })).toValue()); } } @@ -8565,7 +8547,7 @@ fn analyzeErrUnionPayload( } return sema.addConstant( payload_ty, - mod.intern_pool.indexToKey(val.ip_index).error_union.val.payload.toValue(), + mod.intern_pool.indexToKey(val.toIntern()).error_union.val.payload.toValue(), ); } @@ -8633,8 +8615,8 @@ fn analyzeErrUnionPayloadPtr( _ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); } return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ - .ty = operand_pointer_ty.ip_index, - .addr = .{ .eu_payload = ptr_val.ip_index }, + .ty = operand_pointer_ty.toIntern(), + .addr = .{ .eu_payload = ptr_val.toIntern() }, } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { @@ -8642,8 +8624,8 @@ fn analyzeErrUnionPayloadPtr( return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ - .ty = operand_pointer_ty.ip_index, - .addr = .{ .eu_payload = ptr_val.ip_index }, + .ty = operand_pointer_ty.toIntern(), + .addr = .{ .eu_payload = ptr_val.toIntern() }, } })).toValue()); } } @@ -8828,7 +8810,7 @@ fn resolveGenericBody( }; switch (err) { error.GenericPoison => { - if (dest_ty.ip_index == .type_type) { + if (dest_ty.toIntern() == .type_type) { return Value.generic_poison_type; } else { return Value.generic_poison; @@ -9183,7 +9165,7 @@ fn funcCommon( if (is_extern) { return sema.addConstant(fn_ty, (try mod.intern(.{ .extern_func = .{ - .ty = fn_ty.ip_index, + .ty = fn_ty.toIntern(), .decl = sema.owner_decl_index, .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString( gpa, @@ -9223,7 +9205,7 @@ fn funcCommon( .is_noinline = is_noinline, }; return sema.addConstant(fn_ty, (try mod.intern(.{ .func = .{ - .ty = fn_ty.ip_index, + .ty = fn_ty.toIntern(), .index = new_func_index, } })).toValue()); } @@ -10151,16 +10133,16 @@ fn zirSwitchCapture( .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = union_val.ip_index, + .base = union_val.toIntern(), .index = field_index, } }, } })).toValue()); } return sema.addConstant( field_ty, - mod.intern_pool.indexToKey(union_val.ip_index).un.val.toValue(), + mod.intern_pool.indexToKey(union_val.toIntern()).un.val.toValue(), ); } if (is_ref) { @@ -10256,9 +10238,9 @@ fn zirSwitchCapture( if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { return sema.addConstant(field_ty_ptr, (try mod.intern(.{ .ptr = .{ - .ty = field_ty_ptr.ip_index, + .ty = field_ty_ptr.toIntern(), .addr = .{ .field = .{ - .base = op_ptr_val.ip_index, + .base = op_ptr_val.toIntern(), .index = first_field_index, } }, } })).toValue()); @@ -11502,7 +11484,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError cases_len += 1; const item_val = try mod.intern(.{ .err = .{ - .ty = operand_ty.ip_index, + .ty = operand_ty.toIntern(), .name = error_name_ip, } }); const item_ref = try sema.addConstant(operand_ty, item_val.toValue()); @@ -11802,7 +11784,7 @@ fn validateSwitchItemError( const ip = &sema.mod.intern_pool; const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); // TODO: Do i need to typecheck here? - const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.ip_index).err.name); + const error_name = ip.stringToSlice(ip.indexToKey(item_tv.val.toIntern()).err.name); const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev| prev.value else @@ -12035,7 +12017,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ip = &mod.intern_pool; const has_field = hf: { - switch (ip.indexToKey(ty.ip_index)) { + switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => { if (mem.eql(u8, field_name, "ptr")) break :hf true; @@ -12160,17 +12142,23 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1]; - - // TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at + // TODO instead of using `.bytes`, create a new value tag for pointing at // a `*Module.EmbedFile`. The purpose of this would be: // - If only the length is read and the bytes are not inspected by comptime code, // there can be an optimization where the codegen backend does a copy_file_range // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. + const ty = try mod.arrayType(.{ + .len = embed_file.bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); embed_file.owner_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), embed_file.bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), + ty, + (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = embed_file.bytes }, + } })).toValue(), 0, // default alignment ); @@ -12186,7 +12174,7 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R const kv = try mod.getErrorValue(err_name); const error_set_type = try mod.singleErrorSetType(kv.key); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = mod.intern_pool.getString(kv.key).unwrap().?, } })).toValue()); } @@ -12597,15 +12585,15 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstUndef(operand_type); } else if (operand_type.zigTypeTag(mod) == .Vector) { const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod); + elem.* = try (try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod)).intern(scalar_type, mod); } - return sema.addConstant( - operand_type, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_type, (try mod.intern(.{ .aggregate = .{ + .ty = operand_type.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { const result_val = try val.bitwiseNot(operand_type, sema.arena, sema.mod); return sema.addConstant(operand_type, result_val); @@ -12652,22 +12640,22 @@ fn analyzeTupleCat( var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i, mod).ip_index; + types[i] = lhs_ty.structFieldType(i, mod).toIntern(); const default_val = lhs_ty.structFieldDefaultValue(i, mod); - values[i] = default_val.ip_index; + values[i] = default_val.toIntern(); const operand_src = lhs_src; // TODO better source location - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; values[i] = .none; } } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i, mod).ip_index; + types[i + lhs_len] = rhs_ty.structFieldType(i, mod).toIntern(); const default_val = rhs_ty.structFieldDefaultValue(i, mod); - values[i + lhs_len] = default_val.ip_index; + values[i + lhs_len] = default_val.toIntern(); const operand_src = rhs_src; // TODO better source location - if (default_val.ip_index == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; values[i + lhs_len] = .none; } @@ -12824,34 +12812,32 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai else rhs_val; - const final_len_including_sent = result_len + @boolToInt(res_sent_val != null); - const element_vals = try sema.arena.alloc(Value, final_len_including_sent); + const element_vals = try sema.arena.alloc(InternPool.Index, result_len); var elem_i: usize = 0; while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i, mod) else lhs_info.elem_type; const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); - element_vals[elem_i] = coerced_elem_val; + element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod); } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i, mod) else rhs_info.elem_type; const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable"; - const elem_val = if (elem_default_val.ip_index == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); - element_vals[elem_i] = coerced_elem_val; - } - if (res_sent_val) |sent_val| { - element_vals[result_len] = sent_val; + element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod); } - const val = try Value.Tag.aggregate.create(sema.arena, element_vals); - return sema.addConstantMaybeRef(block, result_ty, val, ptr_addrspace != null); + return sema.addConstantMaybeRef(block, result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue(), ptr_addrspace != null); } else break :rs rhs_src; } else lhs_src; @@ -12978,8 +12964,8 @@ fn analyzeTupleMul( const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (0..tuple_len) |i| { - types[i] = operand_ty.structFieldType(i, mod).ip_index; - values[i] = operand_ty.structFieldDefaultValue(i, mod).ip_index; + types[i] = operand_ty.structFieldType(i, mod).toIntern(); + values[i] = operand_ty.structFieldDefaultValue(i, mod).toIntern(); const operand_src = lhs_src; // TODO better source location if (values[i] == .unreachable_value) { runtime_src = operand_src; @@ -13086,8 +13072,8 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (lhs_len == 1) { const elem_val = try lhs_sub_val.elemValue(mod, 0); break :v try mod.intern(.{ .aggregate = .{ - .ty = result_ty.ip_index, - .storage = .{ .repeated_elem = elem_val.ip_index }, + .ty = result_ty.toIntern(), + .storage = .{ .repeated_elem = elem_val.toIntern() }, } }); } @@ -13097,16 +13083,15 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); - assert(elem_val.ip_index != .none); - element_vals[elem_i] = elem_val.ip_index; + element_vals[elem_i] = elem_val.toIntern(); elem_i += 1; } } if (lhs_info.sentinel) |sent_val| { - element_vals[result_len] = sent_val.ip_index; + element_vals[result_len] = sent_val.toIntern(); } break :v try mod.intern(.{ .aggregate = .{ - .ty = result_ty.ip_index, + .ty = result_ty.toIntern(), .storage = .{ .elems = element_vals }, } }); }; @@ -13998,8 +13983,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. else => unreachable, }; const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{ - .ty = resolved_type.ip_index, - .storage = .{ .repeated_elem = scalar_zero.ip_index }, + .ty = resolved_type.toIntern(), + .storage = .{ .repeated_elem = scalar_zero.toIntern() }, } })).toValue() else scalar_zero; return sema.addConstant(resolved_type, zero_val); } @@ -14079,14 +14064,17 @@ fn intRem( ) CompileError!Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty); + scalar.* = try (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.intRemScalar(lhs, rhs, ty); } @@ -14517,11 +14505,13 @@ fn zirOverflowArithmetic( } if (result.inst == .none) { - const values = try sema.arena.alloc(Value, 2); - values[0] = result.wrapped; - values[1] = result.overflow_bit; - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + return sema.addConstant(tuple_ty, (try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty.toIntern(), + .storage = .{ .elems = &.{ + result.wrapped.toIntern(), + result.overflow_bit.toIntern(), + } }, + } })).toValue()); } const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2); @@ -14534,8 +14524,8 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) != .Vector) return val; const repeated = try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = val.ip_index }, + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = val.toIntern() }, } }); return repeated.toValue(); } @@ -14547,7 +14537,7 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { .child = .u1_type, }) else Type.u1; - const types = [2]InternPool.Index{ ty.ip_index, ov_ty.ip_index }; + const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() }; const values = [2]InternPool.Index{ .none, .none }; const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = &types, @@ -15731,7 +15721,7 @@ fn zirClosureGet( scope = scope.parent.?; }; - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and sema.func_index == .none) { + if (tv.val.toIntern() == .unreachable_value and !block.is_typeof and sema.func_index == .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); @@ -15759,7 +15749,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.ip_index == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { + if (tv.val.toIntern() == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { const file = sema.owner_decl.getFileScope(mod); @@ -15789,7 +15779,7 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.ip_index == .unreachable_value) { + if (tv.val.toIntern() == .unreachable_value) { assert(block.is_typeof); // We need a dummy runtime instruction with the correct type. return block.addTy(.alloc, tv.ty); @@ -15840,10 +15830,17 @@ fn zirBuiltinSrc( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const name = mem.span(fn_owner_decl.name); - const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len - 1, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :blk try mod.intern(.{ .ptr = .{ @@ -15857,9 +15854,17 @@ fn zirBuiltinSrc( defer anon_decl.deinit(); // The compiler must not call realpath anywhere. const name = try fn_owner_decl.getFileScope(mod).fullPathZ(anon_decl.arena()); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), name.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :blk try mod.intern(.{ .ptr = .{ @@ -15877,13 +15882,13 @@ fn zirBuiltinSrc( // line: u32, try mod.intern(.{ .runtime_value = .{ .ty = .u32_type, - .val = (try mod.intValue(Type.u32, extra.line + 1)).ip_index, + .val = (try mod.intValue(Type.u32, extra.line + 1)).toIntern(), } }), // column: u32, - (try mod.intValue(Type.u32, extra.column + 1)).ip_index, + (try mod.intValue(Type.u32, extra.column + 1)).toIntern(), }; return sema.addConstant(src_loc_ty, (try mod.intern(.{ .aggregate = .{ - .ty = src_loc_ty.ip_index, + .ty = src_loc_ty.toIntern(), .storage = .{ .elems = &fields }, } })).toValue()); } @@ -15908,8 +15913,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Null, .EnumLiteral, => |type_info_tag| return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).toIntern(), .val = .void_value, } })).toValue()), .Fn => { @@ -15941,8 +15946,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const param_info_decl = mod.declPtr(param_info_decl_index); const param_info_ty = param_info_decl.val.toType(); - const param_vals = try gpa.alloc(InternPool.Index, info.param_types.len); - defer gpa.free(param_vals); + const param_vals = try sema.arena.alloc(InternPool.Index, info.param_types.len); for (param_vals, info.param_types, 0..) |*param_val, param_ty, i| { const is_generic = param_ty == .generic_poison_type; const param_ty_val = try mod.intern_pool.get(gpa, .{ .opt = .{ @@ -15957,40 +15961,40 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const param_fields = .{ // is_generic: bool, - Value.makeBool(is_generic).ip_index, + Value.makeBool(is_generic).toIntern(), // is_noalias: bool, - Value.makeBool(is_noalias).ip_index, + Value.makeBool(is_noalias).toIntern(), // type: ?type, param_ty_val, }; param_val.* = try mod.intern(.{ .aggregate = .{ - .ty = param_info_ty.ip_index, + .ty = param_info_ty.toIntern(), .storage = .{ .elems = ¶m_fields }, } }); } const args_val = v: { const args_slice_ty = try mod.ptrType(.{ - .elem_type = param_info_ty.ip_index, + .elem_type = param_info_ty.toIntern(), .size = .Slice, .is_const = true, }); const new_decl = try params_anon_decl.finish( try mod.arrayType(.{ .len = param_vals.len, - .child = param_info_ty.ip_index, + .child = param_info_ty.toIntern(), .sentinel = .none, }), (try mod.intern(.{ .aggregate = .{ - .ty = args_slice_ty.ip_index, + .ty = args_slice_ty.toIntern(), .storage = .{ .elems = param_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = args_slice_ty.ip_index, + .ty = args_slice_ty.toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, param_vals.len)).ip_index, + .len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(), } }); }; @@ -16003,43 +16007,55 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // calling_convention: CallingConvention, - (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).ip_index, + (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).ip_index, + (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(), // is_generic: bool, - Value.makeBool(info.is_generic).ip_index, + Value.makeBool(info.is_generic).toIntern(), // is_var_args: bool, - Value.makeBool(info.is_var_args).ip_index, + Value.makeBool(info.is_var_args).toIntern(), // return_type: ?type, ret_ty_opt, // args: []const Fn.Param, args_val, }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = fn_info_ty.ip_index, + .ty = fn_info_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); }, .Int => { + const int_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Int", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, int_info_decl_index); + try sema.ensureDeclAnalyzed(int_info_decl_index); + const int_info_decl = mod.declPtr(int_info_decl_index); + const int_info_ty = int_info_decl.val.toType(); + const signedness_ty = try sema.getBuiltinType("Signedness"); const info = ty.intInfo(mod); - const field_values = try sema.arena.alloc(Value, 2); - // signedness: Signedness, - field_values[0] = try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness)); - // bits: u16, - field_values[1] = try mod.intValue(Type.u16, info.bits); - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // signedness: Signedness, + try (try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness))).intern(signedness_ty, mod), + // bits: u16, + (try mod.intValue(Type.u16, info.bits)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = int_info_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Float => { const float_info_decl_index = (try sema.namespaceLookup( @@ -16051,17 +16067,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index); try sema.ensureDeclAnalyzed(float_info_decl_index); const float_info_decl = mod.declPtr(float_info_decl_index); - const float_ty = float_info_decl.val.toType(); + const float_info_ty = float_info_decl.val.toType(); const field_vals = .{ // bits: u16, - (try mod.intValue(Type.u16, ty.bitSize(mod))).ip_index, + (try mod.intValue(Type.u16, ty.bitSize(mod))).toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = float_ty.ip_index, + .ty = float_info_ty.toIntern(), .storage = .{ .elems = &field_vals }, } }), } })).toValue()); @@ -16099,80 +16115,121 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t decl.val.toType(); }; - const field_values = try sema.arena.create([8]Value); - field_values.* = .{ + const field_values = .{ // size: Size, - try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size)), + try (try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size))).intern(ptr_size_ty, mod), // is_const: bool, - Value.makeBool(!info.mutable), + Value.makeBool(!info.mutable).toIntern(), // is_volatile: bool, - Value.makeBool(info.@"volatile"), + Value.makeBool(info.@"volatile").toIntern(), // alignment: comptime_int, - alignment, + alignment.toIntern(), // address_space: AddressSpace - try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace")), + try (try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace"))).intern(addrspace_ty, mod), // child: type, - info.pointee_type.toValue(), + info.pointee_type.toIntern(), // is_allowzero: bool, - Value.makeBool(info.@"allowzero"), + Value.makeBool(info.@"allowzero").toIntern(), // sentinel: ?*const anyopaque, - try sema.optRefValue(block, info.pointee_type, info.sentinel), + (try sema.optRefValue(block, info.pointee_type, info.sentinel)).toIntern(), }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = pointer_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Array => { - const info = ty.arrayInfo(mod); - const field_values = try sema.arena.alloc(Value, 3); - // len: comptime_int, - field_values[0] = try mod.intValue(Type.comptime_int, info.len); - // child: type, - field_values[1] = info.elem_type.toValue(); - // sentinel: ?*const anyopaque, - field_values[2] = try sema.optRefValue(block, info.elem_type, info.sentinel); + const array_field_ty = t: { + const array_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Array", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, array_field_ty_decl_index); + try sema.ensureDeclAnalyzed(array_field_ty_decl_index); + const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index); + break :t array_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const info = ty.arrayInfo(mod); + const field_values = .{ + // len: comptime_int, + (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + // child: type, + info.elem_type.toIntern(), + // sentinel: ?*const anyopaque, + (try sema.optRefValue(block, info.elem_type, info.sentinel)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = array_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Vector => { - const info = ty.arrayInfo(mod); - const field_values = try sema.arena.alloc(Value, 2); - // len: comptime_int, - field_values[0] = try mod.intValue(Type.comptime_int, info.len); - // child: type, - field_values[1] = info.elem_type.toValue(); + const vector_field_ty = t: { + const vector_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Vector", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, vector_field_ty_decl_index); + try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); + const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index); + break :t vector_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const info = ty.arrayInfo(mod); + const field_values = .{ + // len: comptime_int, + (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + // child: type, + info.elem_type.toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = vector_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Optional => { - const field_values = try sema.arena.alloc(Value, 1); - // child: type, - field_values[0] = ty.optionalChild(mod).toValue(); + const optional_field_ty = t: { + const optional_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + "Optional", + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, optional_field_ty_decl_index); + try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); + const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index); + break :t optional_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // child: type, + ty.optionalChild(mod).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = optional_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .ErrorSet => { var fields_anon_decl = try block.startAnonDecl(); @@ -16202,21 +16259,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Value can be zero-length slice otherwise const error_field_vals = if (ty.isAnyError(mod)) null else blk: { const names = ty.errorSetNames(mod); - const vals = try gpa.alloc(InternPool.Index, names.len); - defer gpa.free(vals); + const vals = try sema.arena.alloc(InternPool.Index, names.len); for (vals, names) |*field_val, name_ip| { const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, } }); }; @@ -16226,7 +16289,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai name_val, }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = error_field_ty.ip_index, + .ty = error_field_ty.toIntern(), .storage = .{ .elems = &error_field_fields }, } }); } @@ -16236,39 +16299,39 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our ?[]const Error value const slice_errors_ty = try mod.ptrType(.{ - .elem_type = error_field_ty.ip_index, + .elem_type = error_field_ty.toIntern(), .size = .Slice, .is_const = true, }); - const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.ip_index); + const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern()); const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { const array_errors_ty = try mod.arrayType(.{ .len = vals.len, - .child = error_field_ty.ip_index, + .child = error_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( array_errors_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_errors_ty.ip_index, + .ty = array_errors_ty.toIntern(), .storage = .{ .elems = vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = slice_errors_ty.ip_index, + .ty = slice_errors_ty.toIntern(), .addr = .{ .decl = new_decl }, } }); } else .none; const errors_val = try mod.intern(.{ .opt = .{ - .ty = opt_slice_errors_ty.ip_index, + .ty = opt_slice_errors_ty.toIntern(), .val = errors_payload_val, } }); // Construct Type{ .ErrorSet = errors_val } return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).toIntern(), .val = errors_val, } })).toValue()); }, @@ -16288,22 +16351,22 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // error_set: type, - ty.errorUnionSet(mod).ip_index, + ty.errorUnionSet(mod).toIntern(), // payload: type, - ty.errorUnionPayload(mod).ip_index, + ty.errorUnionPayload(mod).toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = error_union_field_ty.ip_index, + .ty = error_union_field_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); }, .Enum => { // TODO: look into memoizing this result. - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; const is_exhaustive = Value.makeBool(enum_type.tag_mode != .nonexhaustive); @@ -16323,23 +16386,28 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :t enum_field_ty_decl.val.toType(); }; - const enum_field_vals = try gpa.alloc(InternPool.Index, enum_type.names.len); - defer gpa.free(enum_field_vals); - + const enum_field_vals = try sema.arena.alloc(InternPool.Index, enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { const name_ip = enum_type.names[i]; const name = mod.intern_pool.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, } }); }; @@ -16348,10 +16416,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // value: comptime_int, - (try mod.intValue(Type.comptime_int, i)).ip_index, + (try mod.intValue(Type.comptime_int, i)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = enum_field_ty.ip_index, + .ty = enum_field_ty.toIntern(), .storage = .{ .elems = &enum_field_fields }, } }); } @@ -16359,23 +16427,23 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const fields_array_ty = try mod.arrayType(.{ .len = enum_field_vals.len, - .child = enum_field_ty.ip_index, + .child = enum_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( fields_array_ty, (try mod.intern(.{ .aggregate = .{ - .ty = fields_array_ty.ip_index, + .ty = fields_array_ty.toIntern(), .storage = .{ .elems = enum_field_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = enum_field_ty.ip_index, + .elem_type = enum_field_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, } }); }; @@ -16403,13 +16471,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_exhaustive: bool, - is_exhaustive.ip_index, + is_exhaustive.toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_enum_ty.ip_index, + .ty = type_enum_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16460,14 +16528,21 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, } }); }; @@ -16481,12 +16556,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // type: type, - field.ty.ip_index, + field.ty.toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment)).ip_index, + (try mod.intValue(Type.comptime_int, alignment)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = union_field_ty.ip_index, + .ty = union_field_ty.toIntern(), .storage = .{ .elems = &union_field_fields }, } }); } @@ -16494,33 +16569,33 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const array_fields_ty = try mod.arrayType(.{ .len = union_field_vals.len, - .child = union_field_ty.ip_index, + .child = union_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( array_fields_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_fields_ty.ip_index, + .ty = array_fields_ty.toIntern(), .storage = .{ .elems = union_field_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = union_field_ty.ip_index, + .elem_type = union_field_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, union_field_vals.len)).ip_index, + .len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); const enum_tag_ty_val = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType(.type_type)).ip_index, - .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.ip_index else .none, + .ty = (try mod.optionalType(.type_type)).toIntern(), + .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none, } }); const container_layout_ty = t: { @@ -16538,7 +16613,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = .{ // layout: ContainerLayout, - (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).toIntern(), // tag_type: ?type, enum_tag_ty_val, @@ -16548,10 +16623,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai decls_val, }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_union_ty.ip_index, + .ty = type_union_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16595,7 +16670,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); fv: { - const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |tuple| { struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); for ( @@ -16611,16 +16686,24 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // https://github.com/ziglang/zig/issues/15709 @as([]const u8, mod.intern_pool.stringToSlice(tuple.names[i])) else - try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); + try std.fmt.allocPrint(sema.arena, "{d}", .{i}); + const new_decl_ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, Value.zero_u8, Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes.ptr[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = bytes }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + .len = (try mod.intValue(Type.usize, bytes.len)).toIntern(), } }); }; @@ -16633,14 +16716,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: type, field_ty, // default_value: ?*const anyopaque, - default_val_ptr.ip_index, + default_val_ptr.toIntern(), // is_comptime: bool, - Value.makeBool(is_comptime).ip_index, + Value.makeBool(is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).ip_index, + (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).toIntern(), }; struct_field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = struct_field_ty.ip_index, + .ty = struct_field_ty.toIntern(), .storage = .{ .elems = &struct_field_fields }, } }); } @@ -16660,20 +16743,27 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; - const opt_default_val = if (field.default_val.ip_index == .unreachable_value) + const opt_default_val = if (field.default_val.toIntern() == .unreachable_value) null else field.default_val; @@ -16684,16 +16774,16 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // name: []const u8, name_val, // type: type, - field.ty.ip_index, + field.ty.toIntern(), // default_value: ?*const anyopaque, - default_val_ptr.ip_index, + default_val_ptr.toIntern(), // is_comptime: bool, - Value.makeBool(field.is_comptime).ip_index, + Value.makeBool(field.is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment)).ip_index, + (try mod.intValue(Type.comptime_int, alignment)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = struct_field_ty.ip_index, + .ty = struct_field_ty.toIntern(), .storage = .{ .elems = &struct_field_fields }, } }); } @@ -16702,37 +16792,37 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const fields_val = v: { const array_fields_ty = try mod.arrayType(.{ .len = struct_field_vals.len, - .child = struct_field_ty.ip_index, + .child = struct_field_ty.toIntern(), .sentinel = .none, }); const new_decl = try fields_anon_decl.finish( array_fields_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_fields_ty.ip_index, + .ty = array_fields_ty.toIntern(), .storage = .{ .elems = struct_field_vals }, } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = struct_field_ty.ip_index, + .elem_type = struct_field_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, struct_field_vals.len)).ip_index, + .len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(), } }); }; const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod)); const backing_integer_val = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType(.type_type)).ip_index, + .ty = (try mod.optionalType(.type_type)).toIntern(), .val = if (layout == .Packed) val: { const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); assert(struct_obj.backing_int_ty.isInt(mod)); - break :val struct_obj.backing_int_ty.ip_index; + break :val struct_obj.backing_int_ty.toIntern(); } else .none, } }); @@ -16751,7 +16841,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field_values = [_]InternPool.Index{ // layout: ContainerLayout, - (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).ip_index, + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).toIntern(), // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -16759,13 +16849,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_tuple: bool, - Value.makeBool(struct_ty.isTuple(mod)).ip_index, + Value.makeBool(struct_ty.isTuple(mod)).toIntern(), }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_struct_ty.ip_index, + .ty = type_struct_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16794,10 +16884,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai decls_val, }; return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ - .ty = type_info_ty.ip_index, - .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).ip_index, + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).toIntern(), .val = try mod.intern(.{ .aggregate = .{ - .ty = type_opaque_ty.ip_index, + .ty = type_opaque_ty.toIntern(), .storage = .{ .elems = &field_values }, } }), } })).toValue()); @@ -16845,25 +16935,25 @@ fn typeInfoDecls( const array_decl_ty = try mod.arrayType(.{ .len = decl_vals.items.len, - .child = declaration_ty.ip_index, + .child = declaration_ty.toIntern(), .sentinel = .none, }); const new_decl = try decls_anon_decl.finish( array_decl_ty, (try mod.intern(.{ .aggregate = .{ - .ty = array_decl_ty.ip_index, + .ty = array_decl_ty.toIntern(), .storage = .{ .elems = decl_vals.items }, } })).toValue(), 0, // default alignment ); return try mod.intern(.{ .ptr = .{ .ty = (try mod.ptrType(.{ - .elem_type = declaration_ty.ip_index, + .elem_type = declaration_ty.toIntern(), .size = .Slice, .is_const = true, - })).ip_index, + })).toIntern(), .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, decl_vals.items.len)).ip_index, + .len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(), } }); } @@ -16892,16 +16982,24 @@ fn typeInfoNamespaceDecls( const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); + const name = mem.span(decl.name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, + .ty = .slice_const_u8_sentinel_0_type, .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, bytes.len)).ip_index, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), } }); }; @@ -16909,10 +17007,10 @@ fn typeInfoNamespaceDecls( //name: []const u8, name_val, //is_pub: bool, - Value.makeBool(decl.is_pub).ip_index, + Value.makeBool(decl.is_pub).toIntern(), }; try decl_vals.append(try mod.intern(.{ .aggregate = .{ - .ty = declaration_ty.ip_index, + .ty = declaration_ty.toIntern(), .storage = .{ .elems = &fields }, } })); } @@ -16985,7 +17083,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); return mod.vectorType(.{ .len = operand.vectorLen(mod), - .child = log2_elem_ty.ip_index, + .child = log2_elem_ty.toIntern(), }); }, else => {}, @@ -17527,7 +17625,7 @@ fn zirRetErrValue( const kv = try mod.getErrorValue(err_name); const error_set_type = try mod.singleErrorSetType(err_name); const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), } })).toValue()); return sema.analyzeRet(block, result_inst, src); @@ -17854,9 +17952,9 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known"); // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { - .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.ip_index) break :blk .none, + .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.toIntern()) break :blk .none, else => {}, }, else => {}, @@ -17985,7 +18083,7 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com } } return sema.addConstant(obj_ty, (try mod.intern(.{ .aggregate = .{ - .ty = obj_ty.ip_index, + .ty = obj_ty.toIntern(), .storage = .{ .elems = &.{} }, } })).toValue()); } @@ -18021,10 +18119,11 @@ fn unionInit( const tag_ty = union_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); - return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = init_val, - })); + return sema.addConstant(union_ty, (try mod.intern(.{ .un = .{ + .ty = union_ty.toIntern(), + .tag = try tag_val.intern(tag_ty, mod), + .val = try init_val.intern(field.ty, mod), + } })).toValue()); } try sema.requireRuntimeBlock(block, init_src, null); @@ -18125,12 +18224,12 @@ fn zirStructInit( const init_inst = try sema.resolveInst(item.data.init); if (try sema.resolveMaybeUndefVal(init_inst)) |val| { - return sema.addConstantMaybeRef( - block, - resolved_ty, - try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }), - is_ref, - ); + const field = resolved_ty.unionFields(mod).values()[field_index]; + return sema.addConstantMaybeRef(block, resolved_ty, (try mod.intern(.{ .un = .{ + .ty = resolved_ty.toIntern(), + .tag = try tag_val.intern(tag_ty, mod), + .val = try val.intern(field.ty, mod), + } })).toValue(), is_ref); } if (is_ref) { @@ -18171,7 +18270,7 @@ fn finishStructInit( var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct| { for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| { if (field_inits[i] != .none) continue; @@ -18204,7 +18303,7 @@ fn finishStructInit( for (struct_obj.fields.values(), 0..) |field, i| { if (field_inits[i] != .none) continue; - if (field.default_val.ip_index == .unreachable_value) { + if (field.default_val.toIntern() == .unreachable_value) { const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; const args = .{field_name}; @@ -18250,7 +18349,7 @@ fn finishStructInit( .intern(struct_ty.structFieldType(field_i, mod), mod); } const struct_val = try mod.intern(.{ .aggregate = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .storage = .{ .elems = elems }, } }); return sema.addConstantMaybeRef(block, struct_ty, struct_val.toValue(), is_ref); @@ -18331,7 +18430,7 @@ fn zirStructInitAnon( gop.value_ptr.* = i; const init = try sema.resolveInst(item.data.init); - field_ty.* = sema.typeOf(init).ip_index; + field_ty.* = sema.typeOf(init).toIntern(); if (field_ty.toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const decl = sema.mod.declPtr(block.src_decl); @@ -18464,15 +18563,19 @@ fn zirArrayInit( } else null; const runtime_index = opt_runtime_index orelse { - const elem_vals = try sema.arena.alloc(Value, resolved_args.len); - - for (resolved_args, 0..) |arg, i| { + const elem_vals = try sema.arena.alloc(InternPool.Index, resolved_args.len); + for (elem_vals, resolved_args, 0..) |*val, arg, i| { + const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) + array_ty.structFieldType(i, mod) + else + array_ty.elemType2(mod); // We checked that all args are comptime above. - elem_vals[i] = (sema.resolveMaybeUndefVal(arg) catch unreachable).?; + val.* = try ((sema.resolveMaybeUndefVal(arg) catch unreachable).?).intern(elem_ty, mod); } - - const array_val = try Value.Tag.aggregate.create(sema.arena, elem_vals); - return sema.addConstantMaybeRef(block, array_ty, array_val, is_ref); + return sema.addConstantMaybeRef(block, array_ty, (try mod.intern(.{ .aggregate = .{ + .ty = array_ty.toIntern(), + .storage = .{ .elems = elem_vals }, + } })).toValue(), is_ref); }; sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { @@ -18548,7 +18651,7 @@ fn zirArrayInitAnon( for (operands, 0..) |operand, i| { const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); - types[i] = sema.typeOf(elem).ip_index; + types[i] = sema.typeOf(elem).toIntern(); if (types[i].toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); @@ -18560,7 +18663,7 @@ fn zirArrayInitAnon( return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(elem)) |val| { - values[i] = val.ip_index; + values[i] = val.toIntern(); } else { values[i] = .none; runtime_src = operand_src; @@ -18676,7 +18779,7 @@ fn fieldType( const resolved_ty = try sema.resolveTypeFields(cur_ty); cur_ty = resolved_ty; switch (cur_ty.zigTypeTag(mod)) { - .Struct => switch (mod.intern_pool.indexToKey(cur_ty.ip_index)) { + .Struct => switch (mod.intern_pool.indexToKey(cur_ty.toIntern())) { .anon_struct_type => |anon_struct| { const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); return sema.addType(anon_struct.types[field_index].toType()); @@ -18698,7 +18801,7 @@ fn fieldType( .Optional => { // Struct/array init through optional requires the child type to not be a pointer. // If the child of .optional is a pointer it'll error on the next loop. - cur_ty = mod.intern_pool.indexToKey(cur_ty.ip_index).opt_type.toType(); + cur_ty = mod.intern_pool.indexToKey(cur_ty.toIntern()).opt_type.toType(); continue; }, .ErrorUnion => { @@ -18776,7 +18879,7 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - const err_name = sema.mod.intern_pool.indexToKey(val.ip_index).err.name; + const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name; const bytes = sema.mod.intern_pool.stringToSlice(err_name); return sema.addStrLit(block, bytes); } @@ -18820,21 +18923,21 @@ fn zirUnaryMath( const vec_len = operand_ty.vectorLen(mod); const result_ty = try mod.vectorType(.{ .len = vec_len, - .child = scalar_ty.ip_index, + .child = scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(sema.mod, i); - elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod); + elem.* = try (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).intern(scalar_ty, mod); } - return sema.addConstant( - result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } try sema.requireRuntimeBlock(block, operand_src, null); @@ -18867,7 +18970,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); - const tag_name = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const tag_name = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; const bytes = mod.intern_pool.stringToSlice(tag_name); return sema.addStrLit(block, bytes); }, @@ -18956,7 +19059,7 @@ fn zirReify( .AnyFrame => return sema.failWithUseOfAsync(block, src), .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const signedness_val = try union_val.val.fieldValue(mod, fields.getIndex("signedness").?); const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); @@ -18966,7 +19069,7 @@ fn zirReify( return sema.addType(ty); }, .Vector => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); @@ -18977,12 +19080,12 @@ fn zirReify( const ty = try mod.vectorType(.{ .len = len, - .child = child_ty.ip_index, + .child = child_ty.toIntern(), }); return sema.addType(ty); }, .Float => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const bits_val = try union_val.val.fieldValue(mod, fields.getIndex("bits").?); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); @@ -18997,7 +19100,7 @@ fn zirReify( return sema.addType(ty); }, .Pointer => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const size_val = try union_val.val.fieldValue(mod, fields.getIndex("size").?); const is_const_val = try union_val.val.fieldValue(mod, fields.getIndex("is_const").?); const is_volatile_val = try union_val.val.fieldValue(mod, fields.getIndex("is_volatile").?); @@ -19088,7 +19191,7 @@ fn zirReify( return sema.addType(ty); }, .Array => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const len_val = try union_val.val.fieldValue(mod, fields.getIndex("len").?); const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const sentinel_val = try union_val.val.fieldValue(mod, fields.getIndex("sentinel").?); @@ -19107,7 +19210,7 @@ fn zirReify( return sema.addType(ty); }, .Optional => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const child_val = try union_val.val.fieldValue(mod, fields.getIndex("child").?); const child_ty = child_val.toType(); @@ -19116,7 +19219,7 @@ fn zirReify( return sema.addType(ty); }, .ErrorUnion => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const error_set_val = try union_val.val.fieldValue(mod, fields.getIndex("error_set").?); const payload_val = try union_val.val.fieldValue(mod, fields.getIndex("payload").?); @@ -19155,7 +19258,7 @@ fn zirReify( return sema.addType(ty); }, .Struct => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const layout_val = try union_val.val.fieldValue(mod, fields.getIndex("layout").?); const backing_integer_val = try union_val.val.fieldValue(mod, fields.getIndex("backing_integer").?); const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); @@ -19176,7 +19279,7 @@ fn zirReify( return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool(mod)); }, .Enum => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const tag_type_val = try union_val.val.fieldValue(mod, fields.getIndex("tag_type").?); const fields_val = try union_val.val.fieldValue(mod, fields.getIndex("fields").?); const decls_val = try union_val.val.fieldValue(mod, fields.getIndex("decls").?); @@ -19517,7 +19620,7 @@ fn zirReify( return sema.analyzeDeclVal(block, src, new_decl_index); }, .Fn => { - const fields = ip.typeOf(union_val.val.ip_index).toType().structFields(mod); + const fields = ip.typeOf(union_val.val.toIntern()).toType().structFields(mod); const calling_convention_val = try union_val.val.fieldValue(mod, fields.getIndex("calling_convention").?); const alignment_val = try union_val.val.fieldValue(mod, fields.getIndex("alignment").?); const is_generic_val = try union_val.val.fieldValue(mod, fields.getIndex("is_generic").?); @@ -19571,7 +19674,7 @@ fn zirReify( const param_type_val = param_type_opt_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - param_type.* = param_type_val.ip_index; + param_type.* = param_type_val.toIntern(); if (arg_is_noalias) { if (!param_type.toType().isPtrAtRuntime(mod)) { @@ -19733,7 +19836,7 @@ fn reifyStruct( opt_val; break :blk try payload_val.copy(new_decl_arena_allocator); } else Value.@"unreachable"; - if (is_comptime_val.toBool(mod) and default_val.ip_index == .unreachable_value) { + if (is_comptime_val.toBool(mod) and default_val.toIntern() == .unreachable_value) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } @@ -19956,9 +20059,17 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const bytes = try ty.nameAllocArena(anon_decl.arena(), mod); + const decl_ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.array(anon_decl.arena(), bytes.len, try mod.intValue(Type.u8, 0), Type.u8, mod), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = decl_ty.toIntern(), + .storage = .{ .bytes = bytes }, + } })).toValue(), 0, // default alignment ); @@ -20125,8 +20236,8 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat break :disjoint false; } - if (!ip.isInferredErrorSetType(dest_ty.ip_index) and - !ip.isInferredErrorSetType(operand_ty.ip_index)) + if (!ip.isInferredErrorSetType(dest_ty.toIntern()) and + !ip.isInferredErrorSetType(operand_ty.toIntern())) { break :disjoint true; } @@ -20157,7 +20268,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (maybe_operand_val) |val| { if (!dest_ty.isAnyError(mod)) { - const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.ip_index).err.name); + const error_name = mod.intern_pool.stringToSlice(mod.intern_pool.indexToKey(val.toIntern()).err.name); if (!dest_ty.errorSetHasField(error_name, mod)) { const msg = msg: { const msg = try sema.errMsg( @@ -20257,11 +20368,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air if (dest_ty.zigTypeTag(mod) == .Optional) { var dest_ptr_info = dest_ty.optionalChild(mod).ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info), mod); + break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, mod, dest_ptr_info), mod); } else { var dest_ptr_info = dest_ty.ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info); + break :blk try Type.ptr(sema.arena, mod, dest_ptr_info); } }; @@ -20279,10 +20390,10 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air errdefer msg.destroy(sema.gpa); try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{ - operand_ty.fmt(sema.mod), operand_align, + operand_ty.fmt(mod), operand_align, }); try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{ - dest_ty.fmt(sema.mod), dest_align, + dest_ty.fmt(mod), dest_align, }); try sema.errNote(block, src, msg, "consider using '@alignCast'", .{}); @@ -20296,11 +20407,11 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithUseOfUndef(block, operand_src); } if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { - return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); + return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } if (dest_ty.zigTypeTag(mod) == .Optional and sema.typeOf(ptr).zigTypeTag(mod) != .Optional) { return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .val = operand_val.toIntern(), } })).toValue()); } @@ -20335,7 +20446,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData var ptr_info = operand_ty.ptrInfo(mod); ptr_info.mutable = true; - const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { return sema.addConstant(dest_ty, operand_val); @@ -20356,7 +20467,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD var ptr_info = operand_ty.ptrInfo(mod); ptr_info.@"volatile" = false; - const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { return sema.addConstant(dest_ty, operand_val); @@ -20382,7 +20493,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const dest_ty = if (is_vector) try mod.vectorType(.{ .len = operand_ty.vectorLen(mod), - .child = dest_scalar_ty.ip_index, + .child = dest_scalar_ty.toIntern(), }) else dest_scalar_ty; @@ -20405,7 +20516,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (operand_info.signedness != dest_info.signedness) { return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ - @tagName(dest_info.signedness), operand_ty.fmt(sema.mod), + @tagName(dest_info.signedness), operand_ty.fmt(mod), }); } if (operand_info.bits < dest_info.bits) { @@ -20414,7 +20525,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, "destination type '{}' has more bits than source type '{}'", - .{ dest_ty.fmt(sema.mod), operand_ty.fmt(sema.mod) }, + .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ @@ -20434,18 +20545,18 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!is_vector) { return sema.addConstant( dest_ty, - try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod), + try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod), ); } - const elems = try sema.arena.alloc(Value, operand_ty.vectorLen(mod)); + const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod)).intern(dest_scalar_ty, mod); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -20466,7 +20577,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var ptr_info = ptr_ty.ptrInfo(mod); ptr_info.@"align" = dest_align; - var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + var dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (ptr_ty.zigTypeTag(mod) == .Optional) { dest_ty = try mod.optionalType(dest_ty.toIntern()); } @@ -20531,22 +20642,22 @@ fn zirBitCount( const vec_len = operand_ty.vectorLen(mod); const result_ty = try mod.vectorType(.{ .len = vec_len, - .child = result_scalar_ty.ip_index, + .child = result_scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(operand)) |val| { if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); + const elem_val = try val.elemValue(mod, i); const count = comptimeOp(elem_val, scalar_ty, mod); - elem.* = try mod.intValue(scalar_ty, count); + elem.* = (try mod.intValue(scalar_ty, count)).toIntern(); } - return sema.addConstant( - result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_ty, operand); @@ -20580,7 +20691,7 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, operand_src, "@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits", - .{ scalar_ty.fmt(sema.mod), bits }, + .{ scalar_ty.fmt(mod), bits }, ); } @@ -20605,15 +20716,15 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.byteSwap(operand_ty, mod, sema.arena); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.byteSwap(operand_ty, mod, sema.arena)).intern(scalar_ty, mod); } - return sema.addConstant( - operand_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ + .ty = operand_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -20653,15 +20764,15 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstUndef(operand_ty); const vec_len = operand_ty.vectorLen(mod); - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = try val.elemValue(sema.mod, i); - elem.* = try elem_val.bitReverse(scalar_ty, mod, sema.arena); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod); } - return sema.addConstant( - operand_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ + .ty = operand_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -20699,7 +20810,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 .Struct => {}, else => { const msg = msg: { - const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, ty); break :msg msg; @@ -20738,7 +20849,7 @@ fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Com const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .Struct, .Enum, .Union, .Opaque => return, - else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(mod)}), } } @@ -20748,7 +20859,7 @@ fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileEr switch (try ty.zigTypeTagOrPoison(mod)) { .ComptimeInt => return true, .Int => return false, - else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(mod)}), } } @@ -20807,7 +20918,7 @@ fn checkPtrOperand( block, ty_src, "expected pointer, found '{}'", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); @@ -20820,7 +20931,7 @@ fn checkPtrOperand( .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); } fn checkPtrType( @@ -20838,7 +20949,7 @@ fn checkPtrType( block, ty_src, "expected pointer type, found '{}'", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); @@ -20851,7 +20962,7 @@ fn checkPtrType( .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); } fn checkVectorElemType( @@ -20865,7 +20976,7 @@ fn checkVectorElemType( .Int, .Float, .Bool => return, else => if (ty.isPtrAtRuntime(mod)) return, } - return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(mod)}); } fn checkFloatType( @@ -20877,7 +20988,7 @@ fn checkFloatType( const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat, .Float => {}, - else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(mod)}), } } @@ -20894,7 +21005,7 @@ fn checkNumericType( .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, - else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(mod)}), } } @@ -20928,7 +21039,7 @@ fn checkAtomicPtrOperand( block, elem_ty_src, "expected bool, integer, float, enum, or pointer type; found '{}'", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ), }; @@ -20943,7 +21054,7 @@ fn checkAtomicPtrOperand( const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { .Pointer => ptr_ty.ptrInfo(mod), else => { - const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); + const wanted_ptr_ty = try Type.ptr(sema.arena, mod, wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, @@ -20953,7 +21064,7 @@ fn checkAtomicPtrOperand( wanted_ptr_data.@"allowzero" = ptr_data.@"allowzero"; wanted_ptr_data.@"volatile" = ptr_data.@"volatile"; - const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); + const wanted_ptr_ty = try Type.ptr(sema.arena, mod, wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; @@ -21016,12 +21127,12 @@ fn checkIntOrVector( switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(sema.mod), + elem_ty.fmt(mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } } @@ -21040,12 +21151,12 @@ fn checkIntOrVectorAllowComptime( switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(sema.mod), + elem_ty.fmt(mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } } @@ -21054,7 +21165,7 @@ fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Comp const mod = sema.mod; switch (ty.zigTypeTag(mod)) { .ErrorSet => return, - else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(mod)}), } } @@ -21142,7 +21253,7 @@ fn checkVectorizableBinaryOperands( } else { const msg = msg: { const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: '{}' and '{}'", .{ - lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod), + lhs_ty.fmt(mod), rhs_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (lhs_is_vector) { @@ -21161,7 +21272,7 @@ fn checkVectorizableBinaryOperands( fn maybeOptionsSrc(sema: *Sema, block: *Block, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { if (base_src == .unneeded) return .unneeded; const mod = sema.mod; - return mod.optionsSrc(sema.mod.declPtr(block.src_decl), base_src, wanted); + return mod.optionsSrc(mod.declPtr(block.src_decl), base_src, wanted); } fn resolveExportOptions( @@ -21282,7 +21393,7 @@ fn zirCmpxchg( block, elem_ty_src, "expected bool, integer, enum, or pointer type; found '{}'", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ); } const uncasted_ptr = try sema.resolveInst(extra.ptr); @@ -21322,8 +21433,8 @@ fn zirCmpxchg( const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const result_val = try mod.intern(.{ .opt = .{ - .ty = result_ty.ip_index, - .val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { + .ty = result_ty.toIntern(), + .val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: { try sema.storePtr(block, src, ptr, new_value); break :blk .none; } else stored_val.toIntern(), @@ -21363,7 +21474,7 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I try sema.checkVectorElemType(block, scalar_src, scalar_ty); const vector_ty = try mod.vectorType(.{ .len = len, - .child = scalar_ty.ip_index, + .child = scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty); @@ -21489,7 +21600,7 @@ fn analyzeShuffle( const res_ty = try mod.vectorType(.{ .len = mask_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { @@ -21516,11 +21627,11 @@ fn analyzeShuffle( const a_ty = try mod.vectorType(.{ .len = a_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); const b_ty = try mod.vectorType(.{ .len = b_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); if (maybe_a_len == null) a = try sema.addConstUndef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); @@ -21567,25 +21678,21 @@ fn analyzeShuffle( if (try sema.resolveMaybeUndefVal(a)) |a_val| { if (try sema.resolveMaybeUndefVal(b)) |b_val| { - const values = try sema.arena.alloc(Value, mask_len); - - i = 0; - while (i < mask_len) : (i += 1) { + const values = try sema.arena.alloc(InternPool.Index, mask_len); + for (values) |*value| { const mask_elem_val = try mask.elemValue(sema.mod, i); if (mask_elem_val.isUndef(mod)) { - values[i] = Value.undef; + value.* = try mod.intern(.{ .undef = elem_ty.toIntern() }); continue; } const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); - if (int >= 0) { - values[i] = try a_val.elemValue(sema.mod, unsigned); - } else { - values[i] = try b_val.elemValue(sema.mod, unsigned); - } + values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod); } - const res_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(res_ty, res_val); + return sema.addConstant(res_ty, (try mod.intern(.{ .aggregate = .{ + .ty = res_ty.toIntern(), + .storage = .{ .elems = values }, + } })).toValue()); } } @@ -21599,22 +21706,25 @@ fn analyzeShuffle( const max_src = if (a_len > b_len) a_src else b_src; const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len)); - const expand_mask_values = try sema.arena.alloc(Value, max_len); + const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); i = 0; while (i < min_len) : (i += 1) { - expand_mask_values[i] = try mod.intValue(Type.comptime_int, i); + expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern(); } while (i < max_len) : (i += 1) { - expand_mask_values[i] = try mod.intValue(Type.comptime_int, -1); + expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern(); } - const expand_mask = try Value.Tag.aggregate.create(sema.arena, expand_mask_values); + const expand_mask = try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = @intCast(u32, max_len), .child = .comptime_int_type })).toIntern(), + .storage = .{ .elems = expand_mask_values }, + } }); if (a_len < b_len) { const undef = try sema.addConstUndef(a_ty); - a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask, @intCast(u32, max_len)); + a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @intCast(u32, max_len)); } else { const undef = try sema.addConstUndef(b_ty); - b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask, @intCast(u32, max_len)); + b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @intCast(u32, max_len)); } } @@ -21651,7 +21761,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { .Vector, .Array => pred_ty.arrayLen(mod), - else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), + else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}), }; const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64)); @@ -21663,7 +21773,7 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const vec_ty = try mod.vectorType(.{ .len = vec_len, - .child = elem_ty.ip_index, + .child = elem_ty.toIntern(), }); const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src); const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src); @@ -21681,21 +21791,17 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C if (maybe_b) |b_val| { if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); - const elems = try sema.gpa.alloc(Value, vec_len); + const elems = try sema.gpa.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const pred_elem_val = try pred_val.elemValue(sema.mod, i); + const pred_elem_val = try pred_val.elemValue(mod, i); const should_choose_a = pred_elem_val.toBool(mod); - if (should_choose_a) { - elem.* = try a_val.elemValue(sema.mod, i); - } else { - elem.* = try b_val.elemValue(sema.mod, i); - } + elem.* = try (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).intern(elem_ty, mod); } - return sema.addConstant( - vec_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(vec_ty, (try mod.intern(.{ .aggregate = .{ + .ty = vec_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { break :rs b_src; } @@ -22019,7 +22125,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const args = try sema.resolveInst(extra.args); const args_ty = sema.typeOf(args); - if (!args_ty.isTuple(mod) and args_ty.ip_index != .empty_struct_type) { + if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) { return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)}); } @@ -22102,7 +22208,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { - const field = switch (mod.intern_pool.indexToKey(field_ptr_val.ip_index)) { + const field = switch (mod.intern_pool.indexToKey(field_ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .field => |field| field, else => null, @@ -22244,16 +22350,16 @@ fn analyzeMinMax( cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { const lhs_elem_val = try cur_val.elemValue(mod, i); const rhs_elem_val = try operand_val.elemValue(mod, i); - elem.* = opFunc(lhs_elem_val, rhs_elem_val, mod); + elem.* = try opFunc(lhs_elem_val, rhs_elem_val, mod).intern(simd_op.scalar_ty, mod); } - cur_minmax = try sema.addConstant( - simd_op.result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + cur_minmax = try sema.addConstant(simd_op.result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = simd_op.result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { runtime_known.unset(operand_idx); cur_minmax = try sema.addConstant(sema.typeOf(operand), uncasted_operand_val); @@ -22292,7 +22398,7 @@ fn analyzeMinMax( const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); break :blk try mod.vectorType(.{ .len = len, - .child = refined_elem_ty.ip_index, + .child = refined_elem_ty.toIntern(), }); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats @@ -22377,7 +22483,7 @@ fn analyzeMinMax( const final_ty = if (is_vector) try mod.vectorType(.{ .len = unrefined_ty.vectorLen(mod), - .child = final_elem_ty.ip_index, + .child = final_elem_ty.toIntern(), }) else final_elem_ty; @@ -22765,7 +22871,7 @@ fn zirVarExtended( try sema.validateVarType(block, ty_src, var_ty, small.is_extern); return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{ - .ty = var_ty.ip_index, + .ty = var_ty.toIntern(), .init = init_val.toIntern(), .decl = sema.owner_decl_index, .lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString( @@ -23239,7 +23345,7 @@ fn zirBuiltinExtern( { const new_var = try mod.intern(.{ .variable = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .init = .none, .decl = sema.owner_decl_index, .is_extern = true, @@ -23264,7 +23370,7 @@ fn zirBuiltinExtern( try sema.ensureDeclAnalyzed(new_decl_index); const ref = try mod.intern(.{ .ptr = .{ - .ty = (try mod.singleConstPtrType(ty)).ip_index, + .ty = (try mod.singleConstPtrType(ty)).toIntern(), .addr = .{ .decl = new_decl_index }, } }); return sema.addConstant(ty, ref.toValue()); @@ -24207,7 +24313,7 @@ fn fieldVal( switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { const name = try ip.getOrPutString(gpa, field_name); - switch (ip.indexToKey(child_type.ip_index)) { + switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { if (error_set_type.nameIndex(ip, name) != null) break :blk; const msg = msg: { @@ -24232,7 +24338,7 @@ fn fieldVal( else try mod.singleErrorSetTypeNts(name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = name, } })).toValue()); }, @@ -24376,9 +24482,9 @@ fn fieldPtr( if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ - .ty = result_ty.ip_index, + .ty = result_ty.toIntern(), .addr = .{ .field = .{ - .base = val.ip_index, + .base = val.toIntern(), .index = Value.slice_ptr_index, } }, } })).toValue()); @@ -24396,9 +24502,9 @@ fn fieldPtr( if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ - .ty = result_ty.ip_index, + .ty = result_ty.toIntern(), .addr = .{ .field = .{ - .base = val.ip_index, + .base = val.toIntern(), .index = Value.slice_len_index, } }, } })).toValue()); @@ -24429,7 +24535,7 @@ fn fieldPtr( switch (child_type.zigTypeTag(mod)) { .ErrorSet => { const name = try ip.getOrPutString(gpa, field_name); - switch (ip.indexToKey(child_type.ip_index)) { + switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { if (error_set_type.nameIndex(ip, name) != null) { break :blk; @@ -24454,7 +24560,7 @@ fn fieldPtr( return sema.analyzeDeclRef(try anon_decl.finish( error_set_type, (try mod.intern(.{ .err = .{ - .ty = error_set_type.ip_index, + .ty = error_set_type.toIntern(), .name = name, } })).toValue(), 0, // default alignment @@ -24722,9 +24828,9 @@ fn finishFieldCallBind( if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { const pointer = try sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = struct_ptr_val.ip_index, + .base = struct_ptr_val.toIntern(), .index = field_index, } }, } })).toValue()); @@ -24908,7 +25014,7 @@ fn structFieldPtrByIndex( if (field.is_comptime) { const val = try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .comptime_field = try field.default_val.intern(field.ty, mod) }, } }); return sema.addConstant(ptr_field_ty, val.toValue()); @@ -24916,7 +25022,7 @@ fn structFieldPtrByIndex( if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { const val = try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ .base = try struct_ptr_val.intern(struct_ptr_ty, mod), .index = field_index, @@ -24942,7 +25048,7 @@ fn structFieldVal( assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) { + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); @@ -25116,9 +25222,9 @@ fn unionFieldPtr( .Packed, .Extern => {}, } return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = union_ptr_val.ip_index, + .base = union_ptr_val.toIntern(), .index = field_index, } }, } })).toValue()); @@ -25413,16 +25519,16 @@ fn tupleFieldPtr( if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, - .addr = .{ .comptime_field = default_val.ip_index }, + .ty = ptr_field_ty.toIntern(), + .addr = .{ .comptime_field = default_val.toIntern() }, } })).toValue()); } if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| { return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_field_ty.ip_index, + .ty = ptr_field_ty.toIntern(), .addr = .{ .field = .{ - .base = tuple_ptr_val.ip_index, + .base = tuple_ptr_val.toIntern(), .index = field_index, } }, } })).toValue()); @@ -25787,11 +25893,11 @@ fn coerceExtra( var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (maybe_inst_val) |val| { - if (val.ip_index == .none or val.ip_index == .null_value) { + if (val.ip_index == .none) { // Keep the comptime Value representation; take the new type. return sema.addConstant(dest_ty, val); } else { - const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.ip_index, dest_ty.ip_index); + const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.toIntern(), dest_ty.toIntern()); return sema.addConstant(dest_ty, new_val.toValue()); } } @@ -25816,7 +25922,7 @@ fn coerceExtra( // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer if (dest_ty.isPtrLikeOptional(mod) and - dest_ty.elemType2(mod).ip_index == .anyopaque_type and + dest_ty.elemType2(mod).toIntern() == .anyopaque_type and inst_ty.isPtrAtRuntime(mod)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; @@ -25954,7 +26060,7 @@ fn coerceExtra( // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer - if (dest_info.pointee_type.ip_index == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { + if (dest_info.pointee_type.toIntern() == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; const elem_ty = inst_ty.elemType2(mod); if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { @@ -26084,12 +26190,12 @@ fn coerceExtra( // Optional slice is represented with a null pointer so // we use a dummy pointer value with the required alignment. return sema.addConstant(dest_ty, (try mod.intern(.{ .ptr = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .addr = .{ .int = (if (dest_info.@"align" != 0) try mod.intValue(Type.usize, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(mod)).ip_index }, - .len = (try mod.intValue(Type.usize, 0)).ip_index, + try dest_info.pointee_type.lazyAbiAlignment(mod)).toIntern() }, + .len = (try mod.intValue(Type.usize, 0)).toIntern(), } })).toValue()); } @@ -26166,7 +26272,7 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); } - const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.ip_index, dest_ty.ip_index); + const new_val = try mod.intern_pool.getCoerced(sema.gpa, val.toIntern(), dest_ty.toIntern()); return try sema.addConstant(dest_ty, new_val.toValue()); } if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { @@ -26258,7 +26364,7 @@ fn coerceExtra( .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const string = mod.intern_pool.indexToKey(val.ip_index).enum_literal; + const string = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; const bytes = mod.intern_pool.stringToSlice(string); const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse { const msg = msg: { @@ -26294,14 +26400,14 @@ fn coerceExtra( .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { - switch (inst_val.ip_index) { + switch (inst_val.toIntern()) { .undef => return sema.addConstUndef(dest_ty), - else => switch (mod.intern_pool.indexToKey(inst_val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(inst_val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| { const error_set_ty = inst_ty.errorUnionSet(mod); const error_set_val = try sema.addConstant(error_set_ty, (try mod.intern(.{ .err = .{ - .ty = error_set_ty.ip_index, + .ty = error_set_ty.toIntern(), .name = err_name, } })).toValue()); return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src); @@ -26597,7 +26703,7 @@ const InMemoryCoercionResult = union(enum) { break; }, .array_sentinel => |sentinel| { - if (sentinel.actual.ip_index != .unreachable_value) { + if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), }); @@ -26724,7 +26830,7 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_sentinel => |sentinel| { - if (sentinel.actual.ip_index != .unreachable_value) { + if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), }); @@ -27016,9 +27122,9 @@ fn coerceInMemoryAllowedErrorSets( const dst_ies = mod.inferredErrorSetPtr(dst_ies_index); // We will make an effort to return `ok` without resolving either error set, to // avoid unnecessary "unable to resolve error set" dependency loop errors. - switch (src_ty.ip_index) { + switch (src_ty.toIntern()) { .anyerror_type => {}, - else => switch (ip.indexToKey(src_ty.ip_index)) { + else => switch (ip.indexToKey(src_ty.toIntern())) { .inferred_error_set_type => |src_index| { // If both are inferred error sets of functions, and // the dest includes the source function, the coercion is OK. @@ -27054,15 +27160,15 @@ fn coerceInMemoryAllowedErrorSets( var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa); defer missing_error_buf.deinit(); - switch (src_ty.ip_index) { - .anyerror_type => switch (ip.indexToKey(dest_ty.ip_index)) { + switch (src_ty.toIntern()) { + .anyerror_type => switch (ip.indexToKey(dest_ty.toIntern())) { .inferred_error_set_type => unreachable, // Caught by dest_ty.isAnyError(mod) above. .simple_type => unreachable, // filtered out above .error_set_type => return .from_anyerror, else => unreachable, }, - else => switch (ip.indexToKey(src_ty.ip_index)) { + else => switch (ip.indexToKey(src_ty.toIntern())) { .inferred_error_set_type => |src_index| { const src_data = mod.inferredErrorSetPtr(src_index); @@ -27520,9 +27626,9 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { // allocations is relevant to this function, or why it would have // different behavior depending on whether the types were inferred. // Something seems wrong here. - if (prev_ptr_ty.ip_index == .none) { - if (prev_ptr_ty.ip_index == .inferred_alloc_mut_type) return null; - if (prev_ptr_ty.ip_index == .inferred_alloc_const_type) return null; + switch (prev_ptr_ty.ip_index) { + .inferred_alloc_mut_type, .inferred_alloc_const_type => return null, + else => {}, } const prev_ptr_child_ty = prev_ptr_ty.childType(mod); @@ -27554,11 +27660,11 @@ fn storePtrVal( ) !void { const mod = sema.mod; var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty); - try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut); + try sema.checkComptimeVarStore(block, src, mut_kit.mut_decl); switch (mut_kit.pointee) { .direct => |val_ptr| { - if (mut_kit.decl_ref_mut.runtime_index == .comptime_field_ptr) { + if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) { if (!operand_val.eql(val_ptr.*, operand_ty, sema.mod)) { // TODO use failWithInvalidComptimeFieldStore return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{}); @@ -27601,7 +27707,7 @@ fn storePtrVal( } const ComptimePtrMutationKit = struct { - decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, + mut_decl: InternPool.Key.Ptr.Addr.MutDecl, pointee: union(enum) { /// The pointer type matches the actual comptime Value so a direct /// modification is possible. @@ -27627,12 +27733,12 @@ const ComptimePtrMutationKit = struct { decl_arena: std.heap.ArenaAllocator = undefined, fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { - const decl = mod.declPtr(self.decl_ref_mut.decl); + const decl = mod.declPtr(self.mut_decl.decl); return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena); } fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { - const decl = mod.declPtr(self.decl_ref_mut.decl); + const decl = mod.declPtr(self.mut_decl.decl); decl.value_arena.?.release(&self.decl_arena); self.decl_arena = undefined; } @@ -27645,99 +27751,85 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { - if (true) unreachable; const mod = sema.mod; - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; - const decl = sema.mod.declPtr(decl_ref_mut.decl_index); - return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); + const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; + switch (ptr.addr) { + .decl => unreachable, // isComptimeMutablePtr has been checked already + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl); }, - .comptime_field_ptr => { - const payload = ptr_val.castTag(.comptime_field_ptr).?.data; + .comptime_field => |comptime_field| { const duped = try sema.arena.create(Value); - duped.* = payload.field_val; - return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ - .decl_index = @intToEnum(Module.Decl.Index, 0), + duped.* = comptime_field.toValue(); + return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(ptr_val.toIntern()).toType(), duped, ptr_elem_ty, .{ + .decl = undefined, .runtime_index = .comptime_field_ptr, }); }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); - - switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { - .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(mod); - if (elem_ptr.index >= check_len) { - // TODO have the parent include the decl so we can say "declared here" - return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ - elem_ptr.index, check_len, - }); - } - const elem_ty = parent.ty.childType(mod); - - // We might have a pointer to multiple elements of the array (e.g. a pointer - // to a sub-array). In this case, we just have to reinterpret the relevant - // bytes of the whole array rather than any single element. - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return .{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - } - - switch (val_ptr.ip_index) { - .undef => { - // An array has been initialized to undefined at comptime and now we - // are for the first time setting an element. We must change the representation - // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.undef); - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + else => unreachable, + } + if (true) unreachable; + switch (ptr_val.toIntern()) { + .none => switch (ptr_val.tag()) { + .decl_ref_mut => { + const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; + const decl = sema.mod.declPtr(decl_ref_mut.decl_index); + return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); + }, + .comptime_field_ptr => { + const payload = ptr_val.castTag(.comptime_field_ptr).?.data; + const duped = try sema.arena.create(Value); + duped.* = payload.field_val; + return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ + .decl_index = @intToEnum(Module.Decl.Index, 0), + .runtime_index = .comptime_field_ptr, + }); + }, + .elem_ptr => { + const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; + var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); + + switch (parent.pointee) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { + .Array, .Vector => { + const check_len = parent.ty.arrayLenIncludingSentinel(mod); + if (elem_ptr.index >= check_len) { + // TODO have the parent include the decl so we can say "declared here" + return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ + elem_ptr.index, check_len, + }); + } + const elem_ty = parent.ty.childType(mod); + + // We might have a pointer to multiple elements of the array (e.g. a pointer + // to a sub-array). In this case, we just have to reinterpret the relevant + // bytes of the whole array rather than any single element. + const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); + if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return .{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .reinterpret = .{ + .val_ptr = val_ptr, + .byte_offset = elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, + }; + } - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. + switch (val_ptr.toIntern()) { + .undef => { + // An array has been initialized to undefined at comptime and now we + // are for the first time setting an element. We must change the representation + // of the array from `undef` to `array`. const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try mod.intValue(elem_ty, bytes[i]); - } + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + @memset(elems, Value.undef); val_ptr.* = try Value.Tag.aggregate.create(arena, elems); @@ -27751,392 +27843,383 @@ fn beginComptimePtrMutation( parent.decl_ref_mut, ); }, - .str_lit => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `str_lit` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (bytes, 0..) |byte, i| { - elems[i] = try mod.intValue(elem_ty, byte); - } - if (parent.ty.sentinel(mod)) |sent_val| { - assert(elems.len == bytes.len + 1); - elems[bytes.len] = sent_val; - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( + .none => switch (val_ptr.tag()) { + .bytes => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `bytes` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const bytes = val_ptr.castTag(.bytes).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + // bytes.len may be one greater than dest_len because of the case when + // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. + assert(bytes.len >= dest_len); + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (elems, 0..) |*elem, i| { + elem.* = try mod.intValue(elem_ty, bytes[i]); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .str_lit => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `str_lit` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const str_lit = val_ptr.castTag(.str_lit).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (bytes, 0..) |byte, i| { + elems[i] = try mod.intValue(elem_ty, byte); + } + if (parent.ty.sentinel(mod)) |sent_val| { + assert(elems.len == bytes.len + 1); + elems[bytes.len] = sent_val; + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .repeated => { + // An array is memory-optimized to store only a single element value, and + // that value is understood to be the same for the entire length of the array. + // However, now we want to modify an individual field and so the + // representation has to change. If we wanted to avoid this, there would + // need to be special detection elsewhere to identify when writing a value to an + // array element that is stored using the `repeated` tag, and handle it + // without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + if (elems.len > 0) elems[0] = repeated_val; + for (elems[1..]) |*elem| { + elem.* = try repeated_val.copy(arena); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + + .aggregate => return beginComptimePtrMutationInner( sema, block, src, elem_ty, - &elems[elem_ptr.index], + &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], ptr_elem_ty, parent.decl_ref_mut, - ); + ), + + .the_only_possible_value => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + + else => unreachable, }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + else => unreachable, + } + }, + else => { + if (elem_ptr.index != 0) { + // TODO include a "declared here" note for the decl + return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ + elem_ptr.index, + }); + } + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty, + val_ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + }, + .reinterpret => |reinterpret| { + if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .bad_ptr_ty, + .ty = elem_ptr.elem_ty, + }; + } - const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - if (elems.len > 0) elems[0] = repeated_val; - for (elems[1..]) |*elem| { - elem.* = try repeated_val.copy(arena); - } + const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, + }; + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } + }, + .field_ptr => { + const field_ptr = ptr_val.castTag(.field_ptr).?.data; + const field_index = @intCast(u32, field_ptr.field_index); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); + switch (parent.pointee) { + .direct => |val_ptr| switch (val_ptr.toIntern()) { + .undef => { + // A struct or union has been initialized to undefined at comptime and now we + // are for the first time setting a field. We must change the representation + // of the struct/union from `undef` to `struct`/`union`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + switch (parent.ty.zigTypeTag(mod)) { + .Struct => { + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(fields, Value.undef); + + val_ptr.* = try Value.Tag.aggregate.create(arena, fields); return beginComptimePtrMutationInner( sema, block, src, - elem_ty, - &elems[elem_ptr.index], + parent.ty.structFieldType(field_index, mod), + &fields[field_index], ptr_elem_ty, parent.decl_ref_mut, ); }, + .Union => { + const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); + payload.* = .{ .data = .{ + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), + .val = Value.undef, + } }; - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ), + val_ptr.* = Value.initPayload(&payload.base); - .the_only_possible_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); return beginComptimePtrMutationInner( sema, block, src, - elem_ty, - duped, + parent.ty.structFieldType(field_index, mod), + &payload.data.val, ptr_elem_ty, parent.decl_ref_mut, ); }, - + .Pointer => { + assert(parent.ty.isSlice(mod)); + val_ptr.* = try Value.Tag.slice.create(arena, .{ + .ptr = Value.undef, + .len = Value.undef, + }); + + switch (field_index) { + Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ), + Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.decl_ref_mut, + ), + + else => unreachable, + } + }, else => unreachable, - }, - else => unreachable, - } - }, - else => { - if (elem_ptr.index != 0) { - // TODO include a "declared here" note for the decl - return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ - elem_ptr.index, - }); - } - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty, - val_ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - }, - .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = elem_ptr.elem_ty, - }; - } + } + }, + .empty_struct => { + const duped = try sema.arena.create(Value); + duped.* = Value.initTag(.the_only_possible_value); + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + duped, + ptr_elem_ty, + parent.decl_ref_mut, + ); + }, + .none => switch (val_ptr.tag()) { + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &val_ptr.castTag(.aggregate).?.data[field_index], + ptr_elem_ty, + parent.decl_ref_mut, + ), + .repeated => { + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); - - var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| switch (val_ptr.ip_index) { - .undef => { - // A struct or union has been initialized to undefined at comptime and now we - // are for the first time setting a field. We must change the representation - // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - switch (parent.ty.zigTypeTag(mod)) { - .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(fields, Value.undef); - - val_ptr.* = try Value.Tag.aggregate.create(arena, fields); + const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(elems, val_ptr.castTag(.repeated).?.data); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &fields[field_index], + &elems[field_index], ptr_elem_ty, parent.decl_ref_mut, ); }, - .Union => { - const payload = try arena.create(Value.Payload.Union); - const tag_ty = parent.ty.unionTagTypeHypothetical(mod); - payload.* = .{ .data = .{ - .tag = try mod.enumValueFieldIndex(tag_ty, field_index), - .val = Value.undef, - } }; + .@"union" => { + // We need to set the active field of the union. + const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); - val_ptr.* = Value.initPayload(&payload.base); + const payload = &val_ptr.castTag(.@"union").?.data; + payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &payload.data.val, + &payload.val, ptr_elem_ty, parent.decl_ref_mut, ); }, - .Pointer => { - assert(parent.ty.isSlice(mod)); - val_ptr.* = try Value.Tag.slice.create(arena, .{ - .ptr = Value.undef, - .len = Value.undef, - }); + .slice => switch (field_index) { + Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.decl_ref_mut, + ), - switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), + Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.decl_ref_mut, + ), - else => unreachable, - } + else => unreachable, }, - else => unreachable, - } - }, - .empty_struct => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - .repeated => { - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(elems, val_ptr.castTag(.repeated).?.data); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &elems[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .@"union" => { - // We need to set the active field of the union. - const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); - - const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &payload.val, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .slice => switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), else => unreachable, }, - else => unreachable, }, - else => unreachable, - }, - .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); - const field_offset = try sema.usizeCast(block, src, field_offset_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + field_offset, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .eu_payload_ptr => { - const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(mod); - if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, - .ty = payload_ty, - }; - } else { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.undef, - }; - - val_ptr.* = Value.initPayload(&payload.base); - + .reinterpret => |reinterpret| { + const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); + const field_offset = try sema.usizeCast(block, src, field_offset_u64); return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + field_offset, + } }, + .ty = parent.ty, }; - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = eu_ptr.container_ty, - }, - } - }, - .opt_payload_ptr => { - const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); - }; - var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.optionalChild(mod); - switch (val_ptr.ip_index) { - .undef, .null_value => { - // An optional has been initialized to undefined at comptime and now we + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } + }, + .eu_payload_ptr => { + const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; + var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.errorUnionPayload(mod); + if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .ty = payload_ty, + }; + } else { + // An error union has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the - // representation of the optional from `undef` to `opt_payload`. + // representation of the error union from `undef` to `opt_payload`. const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); const payload = try arena.create(Value.Payload.SubValue); payload.* = .{ - .base = .{ .tag = .opt_payload }, + .base = .{ .tag = .eu_payload }, .data = Value.undef, }; @@ -28147,39 +28230,84 @@ fn beginComptimePtrMutation( .pointee = .{ .direct = &payload.data }, .ty = payload_ty, }; - }, - .none => switch (val_ptr.tag()) { - .opt_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .bad_ptr_ty, + .ty = eu_ptr.container_ty, + }, + } + }, + .opt_payload_ptr => { + const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { + return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); + }; + var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.optionalChild(mod); + switch (val_ptr.toIntern()) { + .undef, .null_value => { + // An optional has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the optional from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .opt_payload }, + .data = Value.undef, + }; + + val_ptr.* = Value.initPayload(&payload.base); + + return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; }, + .none => switch (val_ptr.tag()) { + .opt_payload => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, + else => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, + }, + }, else => return ComptimePtrMutationKit{ .decl_ref_mut = parent.decl_ref_mut, .pointee = .{ .direct = val_ptr }, .ty = payload_ty, }, - }, - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = opt_ptr.container_ty, - }, - } + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .decl_ref_mut = parent.decl_ref_mut, + .pointee = .bad_ptr_ty, + .ty = opt_ptr.container_ty, + }, + } + }, + .decl_ref => unreachable, // isComptimeMutablePtr has been checked already + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr) { + else => unreachable, }, - .decl_ref => unreachable, // isComptimeMutablePtr has been checked already - else => unreachable, } } @@ -28190,13 +28318,13 @@ fn beginComptimePtrMutationInner( decl_ty: Type, decl_val: *Value, ptr_elem_ty: Type, - decl_ref_mut: Value.Payload.DeclRefMut.Data, + mut_decl: InternPool.Key.Ptr.Addr.MutDecl, ) CompileError!ComptimePtrMutationKit { const mod = sema.mod; const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; - const decl = mod.declPtr(decl_ref_mut.decl_index); + const decl = mod.declPtr(mut_decl.decl); var decl_arena: std.heap.ArenaAllocator = undefined; const allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); defer decl.value_arena.?.release(&decl_arena); @@ -28204,7 +28332,7 @@ fn beginComptimePtrMutationInner( if (coerce_ok) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .direct = decl_val }, .ty = decl_ty, }; @@ -28215,7 +28343,7 @@ fn beginComptimePtrMutationInner( const decl_elem_ty = decl_ty.childType(mod); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .direct = decl_val }, .ty = decl_ty, }; @@ -28224,20 +28352,20 @@ fn beginComptimePtrMutationInner( if (!decl_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, - .pointee = .{ .bad_decl_ty = {} }, + .mut_decl = mut_decl, + .pointee = .bad_decl_ty, .ty = decl_ty, }; } if (!ptr_elem_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, - .pointee = .{ .bad_ptr_ty = {} }, + .mut_decl = mut_decl, + .pointee = .bad_ptr_ty, .ty = ptr_elem_ty, }; } return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = decl_val, .byte_offset = 0, @@ -28282,7 +28410,7 @@ fn beginComptimePtrLoad( const mod = sema.mod; const target = mod.getTarget(); - var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl => blk: { const decl_index = switch (ptr.addr) { @@ -28319,7 +28447,7 @@ fn beginComptimePtrLoad( (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; if (coerce_in_mem_ok) { - const payload_val = switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + const payload_val = switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| return sema.fail(block, src, "attempt to unwrap error: {s}", .{mod.intern_pool.stringToSlice(err_name)}), .payload => |payload| payload, @@ -28462,7 +28590,7 @@ fn beginComptimePtrLoad( }, Value.slice_len_index => TypedValue{ .ty = Type.usize, - .val = mod.intern_pool.indexToKey(tv.val.ip_index).ptr.len.toValue(), + .val = mod.intern_pool.indexToKey(tv.val.toIntern()).ptr.len.toValue(), }, else => unreachable, }; @@ -28565,9 +28693,9 @@ fn coerceArrayPtrToSlice( const ptr_array_ty = sema.typeOf(inst); const array_ty = ptr_array_ty.childType(mod); const slice_val = try mod.intern(.{ .ptr = .{ - .ty = dest_ty.ip_index, - .addr = mod.intern_pool.indexToKey(val.ip_index).ptr.addr, - .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).ip_index, + .ty = dest_ty.toIntern(), + .addr = mod.intern_pool.indexToKey(val.toIntern()).ptr.addr, + .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(), } }); return sema.addConstant(dest_ty, slice_val.toValue()); } @@ -28643,7 +28771,7 @@ fn coerceCompatiblePtrs( return sema.addConstant(dest_ty, (try mod.intern_pool.getCoerced( sema.gpa, try val.intern(inst_ty, mod), - dest_ty.ip_index, + dest_ty.toIntern(), )).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -28840,7 +28968,7 @@ fn coerceAnonStructToUnion( return sema.failWithOwnedErrorMsg(msg); } - const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const anon_struct = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; const field_name = mod.intern_pool.stringToSlice(anon_struct.names[0]); const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); @@ -28916,23 +29044,20 @@ fn coerceArrayLike( return block.addBitCast(dest_ty, inst); } - const element_vals = try sema.arena.alloc(Value, dest_len); + const element_vals = try sema.arena.alloc(InternPool.Index, dest_len); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len); var runtime_src: ?LazySrcLoc = null; - for (element_vals, 0..) |*elem, i| { - const index_ref = try sema.addConstant( - Type.usize, - try mod.intValue(Type.usize, i), - ); + for (element_vals, element_refs, 0..) |*val, *ref, i| { + const index_ref = try sema.addConstant(Type.usize, try mod.intValue(Type.usize, i)); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); - element_refs[i] = coerced; + ref.* = coerced; if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| { - elem.* = elem_val; + val.* = try elem_val.intern(dest_elem_ty, mod); } else { runtime_src = elem_src; } @@ -28944,10 +29069,10 @@ fn coerceArrayLike( return block.addAggregateInit(dest_ty, element_refs); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, element_vals), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue()); } /// If the lengths match, coerces element-wise. @@ -28978,25 +29103,26 @@ fn coerceTupleToArray( } const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel(mod)); - const element_vals = try sema.arena.alloc(Value, dest_elems); + const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems); const dest_elem_ty = dest_ty.childType(mod); var runtime_src: ?LazySrcLoc = null; - for (element_vals, 0..) |*elem, i_usize| { + for (element_vals, element_refs, 0..) |*val, *ref, i_usize| { const i = @intCast(u32, i_usize); if (i_usize == inst_len) { - elem.* = dest_ty.sentinel(mod).?; - element_refs[i] = try sema.addConstant(dest_elem_ty, elem.*); + const sentinel_val = dest_ty.sentinel(mod).?; + val.* = sentinel_val.toIntern(); + ref.* = try sema.addConstant(dest_elem_ty, sentinel_val); break; } const elem_src = inst_src; // TODO better source location const elem_ref = try sema.tupleField(block, inst_src, inst, elem_src, i); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); - element_refs[i] = coerced; + ref.* = coerced; if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| { - elem.* = elem_val; + val.* = try elem_val.intern(dest_elem_ty, mod); } else { runtime_src = elem_src; } @@ -29008,10 +29134,10 @@ fn coerceTupleToArray( return block.addAggregateInit(dest_ty, element_refs); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, element_vals), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue()); } /// If the lengths match, coerces element-wise. @@ -29079,7 +29205,7 @@ fn coerceTupleToStruct( @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const anon_struct = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const anon_struct = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; var runtime_src: ?LazySrcLoc = null; for (0..anon_struct.types.len) |field_index_usize| { const field_i = @intCast(u32, field_index_usize); @@ -29105,8 +29231,7 @@ fn coerceTupleToStruct( } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - assert(field_val.ip_index != .none); - field_vals[field_index] = field_val.ip_index; + field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } @@ -29123,7 +29248,7 @@ fn coerceTupleToStruct( const field_name = fields.keys()[i]; const field = fields.values()[i]; const field_src = inst_src; // TODO better source location - if (field.default_val.ip_index == .unreachable_value) { + if (field.default_val.toIntern() == .unreachable_value) { const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { @@ -29134,8 +29259,7 @@ fn coerceTupleToStruct( continue; } if (runtime_src == null) { - assert(field.default_val.ip_index != .none); - field_vals[i] = field.default_val.ip_index; + field_vals[i] = field.default_val.toIntern(); } else { field_ref.* = try sema.addConstant(field.ty, field.default_val); } @@ -29152,9 +29276,8 @@ fn coerceTupleToStruct( return block.addAggregateInit(struct_ty, field_refs); } - assert(struct_ty.ip_index != .none); const struct_val = try mod.intern(.{ .aggregate = .{ - .ty = struct_ty.ip_index, + .ty = struct_ty.toIntern(), .storage = .{ .elems = field_vals }, } }); errdefer mod.intern_pool.remove(struct_val); @@ -29170,13 +29293,13 @@ fn coerceTupleToTuple( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; - const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.ip_index).anon_struct_type; + const dest_tuple = mod.intern_pool.indexToKey(tuple_ty.toIntern()).anon_struct_type; const field_vals = try sema.arena.alloc(InternPool.Index, dest_tuple.types.len); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const src_tuple = mod.intern_pool.indexToKey(inst_ty.ip_index).anon_struct_type; + const src_tuple = mod.intern_pool.indexToKey(inst_ty.toIntern()).anon_struct_type; if (src_tuple.types.len > dest_tuple.types.len) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; @@ -29209,7 +29332,7 @@ fn coerceTupleToTuple( } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - field_vals[field_index] = field_val.ip_index; + field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } @@ -29269,7 +29392,7 @@ fn coerceTupleToTuple( return sema.addConstant( tuple_ty, (try mod.intern(.{ .aggregate = .{ - .ty = tuple_ty.ip_index, + .ty = tuple_ty.toIntern(), .storage = .{ .elems = field_vals }, } })).toValue(), ); @@ -29349,7 +29472,7 @@ fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { try sema.maybeQueueFuncBodyAnalysis(decl); try mod.declareDeclDependency(sema.owner_decl_index, decl); const result = try mod.intern(.{ .ptr = .{ - .ty = (try mod.singleConstPtrType(ty)).ip_index, + .ty = (try mod.singleConstPtrType(ty)).toIntern(), .addr = .{ .decl = decl }, } }); return result.toValue(); @@ -29360,8 +29483,8 @@ fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { const val = opt_val orelse return Value.null; const ptr_val = try sema.refValue(block, ty, val); const result = try mod.intern(.{ .opt = .{ - .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).ip_index)).ip_index, - .val = ptr_val.ip_index, + .ty = (try mod.optionalType((try mod.singleConstPtrType(ty)).toIntern())).toIntern(), + .val = ptr_val.toIntern(), } }); return result.toValue(); } @@ -29382,7 +29505,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); const ptr_ty = try mod.ptrType(.{ - .elem_type = decl_tv.ty.ip_index, + .elem_type = decl_tv.ty.toIntern(), .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), .is_const = if (decl.getVariable(mod)) |variable| variable.is_const else false, .address_space = decl.@"addrspace", @@ -29391,7 +29514,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo try sema.maybeQueueFuncBodyAnalysis(decl_index); } return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ - .ty = ptr_ty.ip_index, + .ty = ptr_ty.toIntern(), .addr = .{ .decl = decl_index }, } })).toValue()); } @@ -29415,13 +29538,10 @@ fn analyzeRef( const operand_ty = sema.typeOf(operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - switch (val.ip_index) { - .none => {}, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { - .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), - .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), - else => {}, - }, + switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), + .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), + else => {}, } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -29617,9 +29737,9 @@ fn analyzeIsNonErrComptimeOnly( // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. const set_ty = operand_ty.errorUnionSet(mod); - switch (set_ty.ip_index) { + switch (set_ty.toIntern()) { .anyerror_type => {}, - else => switch (mod.intern_pool.indexToKey(set_ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(set_ty.toIntern())) { .error_set_type => |error_set_type| { if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true; }, @@ -30027,7 +30147,7 @@ fn analyzeSlice( return sema.addConstant(return_ty, (try mod.intern_pool.getCoerced( sema.gpa, try new_ptr_val.intern(new_ptr_ty, mod), - return_ty.ip_index, + return_ty.toIntern(), )).toValue()); } @@ -30546,8 +30666,8 @@ fn wrapOptional( ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(inst)) |val| { return sema.addConstant(dest_ty, (try sema.mod.intern(.{ .opt = .{ - .ty = dest_ty.ip_index, - .val = val.ip_index, + .ty = dest_ty.toIntern(), + .val = val.toIntern(), } })).toValue()); } @@ -30567,8 +30687,8 @@ fn wrapErrorUnionPayload( const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveMaybeUndefVal(coerced)) |val| { return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ - .ty = dest_ty.ip_index, - .val = .{ .payload = val.ip_index }, + .ty = dest_ty.toIntern(), + .val = .{ .payload = val.toIntern() }, } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30588,17 +30708,17 @@ fn wrapErrorUnionSet( const inst_ty = sema.typeOf(inst); const dest_err_set_ty = dest_ty.errorUnionSet(mod); if (try sema.resolveMaybeUndefVal(inst)) |val| { - switch (dest_err_set_ty.ip_index) { + switch (dest_err_set_ty.toIntern()) { .anyerror_type => {}, - else => switch (ip.indexToKey(dest_err_set_ty.ip_index)) { + else => switch (ip.indexToKey(dest_err_set_ty.toIntern())) { .error_set_type => |error_set_type| ok: { - const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, .inferred_error_set_type => |ies_index| ok: { const ies = mod.inferredErrorSetPtr(ies_index); - const expected_name = mod.intern_pool.indexToKey(val.ip_index).err.name; + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. @@ -31252,34 +31372,31 @@ pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileErro /// Make it so that calling hash() and eql() on `val` will not assert due /// to a type not having its layout resolved. fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { - switch (val.ip_index) { - .none => {}, - else => switch (sema.mod.intern_pool.indexToKey(val.ip_index)) { - .int => |int| switch (int.storage) { - .u64, .i64, .big_int => {}, - .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()), - }, - .ptr => |ptr| { - switch (ptr.addr) { - .decl, .mut_decl => {}, - .int => |int| try sema.resolveLazyValue(int.toValue()), - .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()), - .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()), - .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()), - } - if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue()); - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => {}, - .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()), - .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()), - }, - .un => |un| { - try sema.resolveLazyValue(un.tag.toValue()); - try sema.resolveLazyValue(un.val.toValue()); - }, - else => {}, + switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| try sema.resolveTypeLayout(lazy_ty.toType()), + }, + .ptr => |ptr| { + switch (ptr.addr) { + .decl, .mut_decl => {}, + .int => |int| try sema.resolveLazyValue(int.toValue()), + .eu_payload, .opt_payload => |base| try sema.resolveLazyValue(base.toValue()), + .comptime_field => |comptime_field| try sema.resolveLazyValue(comptime_field.toValue()), + .elem, .field => |base_index| try sema.resolveLazyValue(base_index.base.toValue()), + } + if (ptr.len != .none) try sema.resolveLazyValue(ptr.len.toValue()); + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => {}, + .elems => |elems| for (elems) |elem| try sema.resolveLazyValue(elem.toValue()), + .repeated_elem => |elem| try sema.resolveLazyValue(elem.toValue()), }, + .un => |un| { + try sema.resolveLazyValue(un.tag.toValue()); + try sema.resolveLazyValue(un.val.toValue()); + }, + else => {}, } } @@ -31617,9 +31734,9 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const mod = sema.mod; - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .empty_struct_type => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => false, .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); @@ -31776,7 +31893,7 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, - .Struct => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => return sema.resolveStructFully(ty), .anon_struct_type => |tuple| { for (tuple.types) |field_ty| { @@ -31869,7 +31986,7 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { const mod = sema.mod; - switch (ty.ip_index) { + switch (ty.toIntern()) { .var_args_param_type => unreachable, .none => unreachable, @@ -31960,7 +32077,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .call_modifier_type => return sema.getBuiltinType("CallModifier"), .prefetch_options_type => return sema.getBuiltinType("PrefetchOptions"), - _ => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + _ => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return ty; try sema.resolveTypeFieldsStruct(ty, struct_obj); @@ -32605,7 +32722,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } else { // The provided type is the enum tag type. union_obj.tag_ty = provided_ty; - const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.ip_index)) { + const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.toIntern())) { .enum_type => |x| x, else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}), }; @@ -32698,7 +32815,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk val; }; - enum_field_vals[field_i] = copied_val.ip_index; + enum_field_vals[field_i] = copied_val.toIntern(); const gop = enum_field_vals_map.getOrPutAssumeCapacityContext(copied_val, .{ .ty = int_tag_ty, .mod = mod, @@ -32960,7 +33077,7 @@ fn generateUnionTagTypeSimple( .tag_ty = if (enum_field_names.len == 0) .noreturn_type else - (try mod.smallestUnsignedInt(enum_field_names.len - 1)).ip_index, + (try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(), .names = enum_field_names, .values = &.{}, .tag_mode = .auto, @@ -33053,9 +33170,9 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const mod = sema.mod; - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .empty_struct_type => Value.empty_struct, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => |int_type| { if (int_type.bits == 0) { return try mod.intValue(ty, 0); @@ -33074,13 +33191,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { inline .array_type, .vector_type => |seq_type| { if (seq_type.len == 0) return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } })).toValue(); if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| { return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, - .storage = .{ .repeated_elem = opv.ip_index }, + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = opv.toIntern() }, } })).toValue(); } return null; @@ -33169,7 +33286,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // This TODO is repeated in the redundant implementation of // one-possible-value in type.zig. const empty = try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = &.{} }, } }); return empty.toValue(); @@ -33182,7 +33299,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // In this case the struct has all comptime-known fields and // therefore has one possible value. return (try mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = tuple.values }, } })).toValue(); }, @@ -33208,9 +33325,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse return null; const only = try mod.intern(.{ .un = .{ - .ty = resolved_ty.ip_index, - .tag = tag_val.ip_index, - .val = val_val.ip_index, + .ty = resolved_ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val_val.toIntern(), } }); return only.toValue(); }, @@ -33221,8 +33338,8 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| { const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.ip_index, - .int = int_opv.ip_index, + .ty = ty.toIntern(), + .int = int_opv.toIntern(), } }); return only.toValue(); } @@ -33234,7 +33351,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { 1 => { if (enum_type.values.len == 0) { const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .int = try mod.intern(.{ .int = .{ .ty = enum_type.tag_ty, .storage = .{ .u64 = 0 }, @@ -33285,21 +33402,13 @@ pub fn getTmpAir(sema: Sema) Air { } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { - if (ty.ip_index != .none) { - if (@enumToInt(ty.ip_index) < Air.ref_start_index) - return @intToEnum(Air.Inst.Ref, @enumToInt(ty.ip_index)); - try sema.air_instructions.append(sema.gpa, .{ - .tag = .interned, - .data = .{ .interned = ty.ip_index }, - }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); - } else { - try sema.air_instructions.append(sema.gpa, .{ - .tag = .const_ty, - .data = .{ .ty = ty }, - }); - return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); - } + if (@enumToInt(ty.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(ty.toIntern())); + try sema.air_instructions.append(sema.gpa, .{ + .tag = .interned, + .data = .{ .interned = ty.toIntern() }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -33313,12 +33422,12 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { const gpa = sema.gpa; - if (val.ip_index != .none and val.ip_index != .null_value) { - if (@enumToInt(val.ip_index) < Air.ref_start_index) - return @intToEnum(Air.Inst.Ref, @enumToInt(val.ip_index)); + if (val.ip_index != .none) { + if (@enumToInt(val.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); try sema.air_instructions.append(gpa, .{ .tag = .interned, - .data = .{ .interned = val.ip_index }, + .data = .{ .interned = val.toIntern() }, }); const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); // This assertion can be removed when the `ty` parameter is removed from @@ -33417,7 +33526,7 @@ fn analyzeComptimeAlloc( try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); return sema.addConstant(ptr_type, (try sema.mod.intern(.{ .ptr = .{ - .ty = ptr_type.ip_index, + .ty = ptr_type.toIntern(), .addr = .{ .mut_decl = .{ .decl = decl_index, .runtime_index = block.runtime_index, @@ -33589,7 +33698,7 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError /// This logic must be kept in sync with `Type.isPtrLikeOptional`. fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { const mod = sema.mod; - return switch (mod.intern_pool.indexToKey(ty.ip_index)) { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.size) { .Slice => null, .C => ptr_type.elem_type.toType(), @@ -33624,10 +33733,10 @@ fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { /// elsewhere in value.zig pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { const mod = sema.mod; - return switch (ty.ip_index) { + return switch (ty.toIntern()) { .empty_struct_type => false, - else => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { .int_type => return false, .ptr_type => |ptr_type| { const child_ty = ptr_type.elem_type.toType(); @@ -33873,7 +33982,7 @@ fn anonStructFieldIndex( field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; - const anon_struct = mod.intern_pool.indexToKey(struct_ty.ip_index).anon_struct_type; + const anon_struct = mod.intern_pool.indexToKey(struct_ty.toIntern()).anon_struct_type; for (anon_struct.names, 0..) |name, i| { if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { return @intCast(u32, i); @@ -33891,14 +34000,17 @@ fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty); + scalar.* = try (try sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.intAddScalar(lhs, rhs, ty); } @@ -33945,14 +34057,17 @@ fn numberAddWrapScalar( fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty); + scalar.* = try (try sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.intSubScalar(lhs, rhs, ty); } @@ -34004,18 +34119,26 @@ fn intSubWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); + const result_data = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return sema.intSubWithOverflowScalar(lhs, rhs, ty); @@ -34057,13 +34180,17 @@ fn floatToInt( ) CompileError!Value { const mod = sema.mod; if (float_ty.zigTypeTag(mod) == .Vector) { - const elem_ty = float_ty.childType(mod); - const result_data = try sema.arena.alloc(Value, float_ty.vectorLen(mod)); + const elem_ty = float_ty.scalarType(mod); + const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod)); + const scalar_ty = int_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(sema.mod, i); - scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod)); + scalar.* = try (try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod))).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = int_ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.floatToIntScalar(block, src, val, float_ty, int_ty); } @@ -34139,16 +34266,16 @@ fn intFitsInType( vector_index: ?*usize, ) CompileError!bool { const mod = sema.mod; - if (ty.ip_index == .comptime_int_type) return true; + if (ty.toIntern() == .comptime_int_type) return true; const info = ty.intInfo(mod); - switch (val.ip_index) { + switch (val.toIntern()) { .undef, .zero, .zero_usize, .zero_u8, => return true, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable, .extern_func, .func, .ptr => { const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); @@ -34219,12 +34346,12 @@ fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { /// Asserts the type is an enum. fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { const mod = sema.mod; - const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; assert(enum_type.tag_mode != .nonexhaustive); // The `tagValueIndex` function call below relies on the type being the integer tag type. // `getCoerced` assumes the value will fit the new type. if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false; - const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.ip_index, enum_type.tag_ty); + const int_coerced = try mod.intern_pool.getCoerced(sema.gpa, int.toIntern(), enum_type.tag_ty); return enum_type.tagValueIndex(&mod.intern_pool, int_coerced) != null; } @@ -34237,18 +34364,26 @@ fn intAddWithOverflow( ) !Value.OverflowArithmeticResult { const mod = sema.mod; if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); + const result_data = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); - const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod)); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return sema.intAddWithOverflowScalar(lhs, rhs, ty); @@ -34340,14 +34475,17 @@ fn compareVector( ) !Value { const mod = sema.mod; assert(ty.zigTypeTag(mod) == .Vector); - const result_data = try sema.arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(sema.mod, i); const rhs_elem = try rhs.elemValue(sema.mod, i); const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); - scalar.* = Value.makeBool(res_bool); + scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } /// Returns the type of a pointer to an element. diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 2222c1060e..d82fb72dea 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -103,10 +103,26 @@ pub fn print( return writer.writeAll(" }"); }, .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); + .repeated => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + var i: u32 = 0; + try writer.writeAll(".{ "); + const elem_tv = TypedValue{ + .ty = ty.elemType2(mod), + .val = val.castTag(.repeated).?.data, + }; + const len = ty.arrayLen(mod); + const max_len = std.math.min(len, max_aggregate_items); + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + try print(elem_tv, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); }, // TODO these should not appear in this function .inferred_alloc => return writer.writeAll("(inferred allocation value)"), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index faf158e2a4..16b103c898 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -846,7 +846,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -6169,7 +6168,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 778662fe86..f0a44b72a8 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -830,7 +830,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -6117,7 +6116,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index a9cd130fa8..7f4715a451 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -660,7 +660,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -2571,7 +2570,6 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index dc086dc00f..9f44dc0e8a 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -680,7 +680,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => self.finishAirBookkeeping(), @@ -4567,7 +4566,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst), } } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 66c0399343..85fc8346f8 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1833,7 +1833,6 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const air_tags = func.air.instructions.items(.tag); return switch (air_tags[inst]) { .constant => unreachable, - .const_ty => unreachable, .interned => unreachable, .add => func.airBinOp(inst, .add), @@ -6903,28 +6902,12 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { .child = .u8_type, .sentinel = .zero_u8, }); - const string_bytes = &mod.string_literal_bytes; - try string_bytes.ensureUnusedCapacity(mod.gpa, tag_name.len); - const gop = try mod.string_literal_table.getOrPutContextAdapted(mod.gpa, @as([]const u8, tag_name), Module.StringLiteralAdapter{ - .bytes = string_bytes, - }, Module.StringLiteralContext{ - .bytes = string_bytes, - }); - if (!gop.found_existing) { - gop.key_ptr.* = .{ - .index = @intCast(u32, string_bytes.items.len), - .len = @intCast(u32, tag_name.len), - }; - string_bytes.appendSliceAssumeCapacity(tag_name); - gop.value_ptr.* = .none; - } - var name_val_payload: Value.Payload.StrLit = .{ - .base = .{ .tag = .str_lit }, - .data = gop.key_ptr.*, - }; - const name_val = Value.initPayload(&name_val_payload.base); + const name_val = try mod.intern(.{ .aggregate = .{ + .ty = name_ty.toIntern(), + .storage = .{ .bytes = tag_name }, + } }); const tag_sym_index = try func.bin_file.lowerUnnamedConst( - .{ .ty = name_ty, .val = name_val }, + .{ .ty = name_ty, .val = name_val.toValue() }, enum_decl_index, ); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 4a5532a239..f2ac985844 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1923,7 +1923,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_ptr => try self.airPtrElemPtr(inst), .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), @@ -2099,7 +2098,7 @@ fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { switch (self.air.instructions.items(.tag)[inst]) { - .constant, .const_ty => unreachable, + .constant => unreachable, else => self.inst_tracking.getPtr(inst).?.die(self, inst), } } @@ -11593,7 +11592,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { })); break :tracking gop.value_ptr; }, - .const_ty => unreachable, else => self.inst_tracking.getPtr(inst).?, }.short; switch (mcv) { @@ -11608,7 +11606,6 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { const tracking = switch (self.air.instructions.items(.tag)[inst]) { .constant => &self.const_tracking, - .const_ty => unreachable, else => &self.inst_tracking, }.getPtr(inst).?; return switch (tracking.short) { diff --git a/src/codegen.zig b/src/codegen.zig index b9b7dac90f..f343f0441d 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -204,150 +204,6 @@ pub fn generateSymbol( return .ok; } - if (typed_value.val.ip_index == .none) switch (typed_value.ty.zigTypeTag(mod)) { - .Array => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel(mod)); - // The bytes payload already includes the sentinel, if any - try code.ensureUnusedCapacity(len); - code.appendSliceAssumeCapacity(bytes[0..len]); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - try code.ensureUnusedCapacity(bytes.len + 1); - code.appendSliceAssumeCapacity(bytes); - if (typed_value.ty.sentinel(mod)) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); - code.appendAssumeCapacity(byte); - } - return Result.ok; - }, - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for array type value: {s}", - .{@tagName(typed_value.val.tag())}, - ), - }, - }, - .Struct => { - if (typed_value.ty.containerLayout(mod) == .Packed) { - const struct_obj = mod.typeToStruct(typed_value.ty).?; - const fields = struct_obj.fields.values(); - const field_vals = typed_value.val.castTag(.aggregate).?.data; - const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; - const current_pos = code.items.len; - try code.resize(current_pos + abi_size); - var bits: u16 = 0; - - for (field_vals, 0..) |field_val, index| { - const field_ty = fields[index].ty; - // pointer may point to a decl which must be marked used - // but can also result in a relocation. Therefore we handle those seperately. - if (field_ty.zigTypeTag(mod) == .Pointer) { - const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse return error.Overflow; - var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); - defer tmp_list.deinit(); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = field_val, - }, &tmp_list, debug_output, reloc_info)) { - .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), - .fail => |em| return Result{ .fail = em }, - } - } else { - field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; - } - bits += @intCast(u16, field_ty.bitSize(mod)); - } - - return Result.ok; - } - - const struct_begin = code.items.len; - const field_vals = typed_value.val.castTag(.aggregate).?.data; - for (field_vals, 0..) |field_val, index| { - const field_ty = typed_value.ty.structFieldType(index, mod); - if (!field_ty.hasRuntimeBits(mod)) continue; - - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = field_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - const unpadded_field_end = code.items.len - struct_begin; - - // Pad struct members if required - const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); - const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; - - if (padding > 0) { - try code.writer().writeByteNTimes(0, padding); - } - } - - return Result.ok; - }, - .Vector => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen(mod)) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(len + padding); - code.appendSliceAssumeCapacity(bytes[0..len]); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(mod) - str_lit.len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(str_lit.len + padding); - code.appendSliceAssumeCapacity(bytes); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; - }, - else => unreachable, - }, - .Frame, - .AnyFrame, - => return .{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO generateSymbol for type {}", - .{typed_value.ty.fmt(mod)}, - ) }, - .Float, - .Union, - .Optional, - .ErrorUnion, - .ErrorSet, - .Int, - .Enum, - .Bool, - .Pointer, - => unreachable, // handled below - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .Opaque, - .EnumLiteral, - .Fn, - => unreachable, // comptime-only types - }; - switch (mod.intern_pool.indexToKey(typed_value.val.ip_index)) { .int_type, .ptr_type, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1bb8130b1f..76533b4284 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -870,7 +870,7 @@ pub const DeclGen = struct { } // First try specific tag representations for more efficiency. - switch (val.ip_index) { + switch (val.toIntern()) { .undef => { const ai = ty.arrayInfo(mod); try writer.writeByte('{'); @@ -893,24 +893,6 @@ pub const DeclGen = struct { try writer.writeByte('}'); return; }, - .none => switch (val.tag()) { - .bytes, .str_lit => |t| { - const bytes = switch (t) { - .bytes => val.castTag(.bytes).?.data, - .str_lit => bytes: { - const str_lit = val.castTag(.str_lit).?.data; - break :bytes mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - }, - else => unreachable, - }; - const sentinel = if (ty.sentinel(mod)) |sentinel| @intCast(u8, sentinel.toUnsignedInt(mod)) else null; - try writer.print("{s}", .{ - fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen(mod))], sentinel), - }); - return; - }, - else => {}, - }, else => {}, } // Fall back to generic implementation. @@ -2909,7 +2891,6 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, const result_value = switch (air_tags[inst]) { // zig fmt: off .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies .interned => unreachable, // excluded from function bodies .arg => try airArg(f, inst), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index f8ddddad1c..e54b951aa6 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1501,7 +1501,7 @@ pub const Object = struct { } const ip = &mod.intern_pool; - const enum_type = ip.indexToKey(ty.ip_index).enum_type; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len); defer gpa.free(enumerators); @@ -1697,7 +1697,7 @@ pub const Object = struct { return ptr_di_ty; }, .Opaque => { - if (ty.ip_index == .anyopaque_type) { + if (ty.toIntern() == .anyopaque_type) { const di_ty = dib.createBasicType("anyopaque", 0, DW.ATE.signed); gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; @@ -1981,7 +1981,7 @@ pub const Object = struct { break :blk fwd_decl; }; - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var di_fields: std.ArrayListUnmanaged(*llvm.DIType) = .{}; defer di_fields.deinit(gpa); @@ -2466,7 +2466,7 @@ pub const DeclGen = struct { global.setGlobalConstant(.True); break :init_val decl.val; }; - if (init_val.ip_index != .unreachable_value) { + if (init_val.toIntern() != .unreachable_value) { const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val }); if (global.globalGetValueType() == llvm_init.typeOf()) { global.setInitializer(llvm_init); @@ -2802,12 +2802,12 @@ pub const DeclGen = struct { return dg.context.pointerType(llvm_addrspace); }, .Opaque => { - if (t.ip_index == .anyopaque_type) return dg.context.intType(8); + if (t.toIntern() == .anyopaque_type) return dg.context.intType(8); const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - const opaque_type = mod.intern_pool.indexToKey(t.ip_index).opaque_type; + const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type; const name = try mod.opaqueFullyQualifiedName(opaque_type); defer gpa.free(name); @@ -2897,7 +2897,7 @@ pub const DeclGen = struct { const gop = try dg.object.type_map.getOrPut(gpa, t.toIntern()); if (gop.found_existing) return gop.value_ptr.*; - const struct_type = switch (mod.intern_pool.indexToKey(t.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(t.toIntern())) { .anon_struct_type => |tuple| { const llvm_struct_ty = dg.context.structCreateNamed(""); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls @@ -3199,7 +3199,7 @@ pub const DeclGen = struct { const mod = dg.module; const target = mod.getTarget(); var tv = arg_tv; - switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .runtime_value => |rt| tv.val = rt.val.toValue(), else => {}, } @@ -3208,284 +3208,7 @@ pub const DeclGen = struct { return llvm_type.getUndef(); } - if (tv.val.ip_index == .none) switch (tv.ty.zigTypeTag(mod)) { - .Array => switch (tv.val.tag()) { - .bytes => { - const bytes = tv.val.castTag(.bytes).?.data; - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, tv.ty.arrayLenIncludingSentinel(mod)), - .True, // Don't null terminate. Bytes has the sentinel, if any. - ); - }, - .str_lit => { - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - if (tv.ty.sentinel(mod)) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(mod)); - if (byte == 0 and bytes.len > 0) { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .False, // Yes, null terminate. - ); - } - var array = std.ArrayList(u8).init(dg.gpa); - defer array.deinit(); - try array.ensureUnusedCapacity(bytes.len + 1); - array.appendSliceAssumeCapacity(bytes); - array.appendAssumeCapacity(byte); - return dg.context.constString( - array.items.ptr, - @intCast(c_uint, array.items.len), - .True, // Don't null terminate. - ); - } else { - return dg.context.constString( - bytes.ptr, - @intCast(c_uint, bytes.len), - .True, // Don't null terminate. `bytes` has the sentinel, if any. - ); - } - }, - else => unreachable, - }, - .Struct => { - const llvm_struct_ty = try dg.lowerType(tv.ty); - const gpa = dg.gpa; - - const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { - .anon_struct_type => |tuple| { - var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; - defer llvm_fields.deinit(gpa); - - try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; - - for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { - if (field_val != .none) continue; - if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; - - const field_align = field_ty.toType().abiAlignment(mod); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - - const field_llvm_val = try dg.lowerValue(.{ - .ty = field_ty.toType(), - .val = try tv.val.fieldValue(mod, i), - }); - - need_unnamed = need_unnamed or dg.isUnnamedType(field_ty.toType(), field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); - - offset += field_ty.toType().abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } - - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - }, - .struct_type => |struct_type| struct_type, - else => unreachable, - }; - - const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - - if (struct_obj.layout == .Packed) { - assert(struct_obj.haveLayout()); - const big_bits = struct_obj.backing_int_ty.bitSize(mod); - const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits)); - const fields = struct_obj.fields.values(); - comptime assert(Type.packed_struct_layout_version == 2); - var running_int: *llvm.Value = int_llvm_ty.constNull(); - var running_bits: u16 = 0; - for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - - const non_int_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(mod, i), - }); - const ty_bit_size = @intCast(u16, field.ty.bitSize(mod)); - const small_int_ty = dg.context.intType(ty_bit_size); - const small_int_val = if (field.ty.isPtrAtRuntime(mod)) - non_int_val.constPtrToInt(small_int_ty) - else - non_int_val.constBitCast(small_int_ty); - const shift_rhs = int_llvm_ty.constInt(running_bits, .False); - // If the field is as large as the entire packed struct, this - // zext would go from, e.g. i16 to i16. This is legal with - // constZExtOrBitCast but not legal with constZExt. - const extended_int_val = small_int_val.constZExtOrBitCast(int_llvm_ty); - const shifted = extended_int_val.constShl(shift_rhs); - running_int = running_int.constOr(shifted); - running_bits += ty_bit_size; - } - return running_int; - } - - const llvm_field_count = llvm_struct_ty.countStructElementTypes(); - var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count); - defer llvm_fields.deinit(gpa); - - comptime assert(struct_layout_version == 2); - var offset: u64 = 0; - var big_align: u32 = 0; - var need_unnamed = false; - - var it = struct_obj.runtimeFieldIterator(mod); - while (it.next()) |field_and_index| { - const field = field_and_index.field; - const field_align = field.alignment(mod, struct_obj.layout); - big_align = @max(big_align, field_align); - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); - - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - // TODO make this and all other padding elsewhere in debug - // builds be 0xaa not undef. - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - - const field_llvm_val = try dg.lowerValue(.{ - .ty = field.ty, - .val = try tv.val.fieldValue(mod, field_and_index.index), - }); - - need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); - - llvm_fields.appendAssumeCapacity(field_llvm_val); - - offset += field.ty.abiSize(mod); - } - { - const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); - const padding_len = offset - prev_offset; - if (padding_len > 0) { - const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); - llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); - } - } - - if (need_unnamed) { - return dg.context.constStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - .False, - ); - } else { - return llvm_struct_ty.constNamedStruct( - llvm_fields.items.ptr, - @intCast(c_uint, llvm_fields.items.len), - ); - } - }, - .Vector => switch (tv.val.tag()) { - .bytes => { - // Note, sentinel is not stored even if the type has a sentinel. - const bytes = tv.val.castTag(.bytes).?.data; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len or vector_len + 1 == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - .str_lit => { - // Note, sentinel is not stored - const str_lit = tv.val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const vector_len = @intCast(usize, tv.ty.arrayLen(mod)); - assert(vector_len == bytes.len); - - const elem_ty = tv.ty.childType(mod); - const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len); - defer dg.gpa.free(llvm_elems); - for (llvm_elems, 0..) |*elem, i| { - elem.* = try dg.lowerValue(.{ - .ty = elem_ty, - .val = try mod.intValue(elem_ty, bytes[i]), - }); - } - return llvm.constVector( - llvm_elems.ptr, - @intCast(c_uint, llvm_elems.len), - ); - }, - else => unreachable, - }, - .Float, - .Union, - .Optional, - .ErrorUnion, - .ErrorSet, - .Int, - .Enum, - .Bool, - .Pointer, - => unreachable, // handled below - .Frame, - .AnyFrame, - => return dg.todo("implement const of type '{}'", .{tv.ty.fmtDebug()}), - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .Opaque, - .EnumLiteral, - .Fn, - => unreachable, // comptime-only types - }; - - switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -3553,7 +3276,7 @@ pub const DeclGen = struct { const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, .val = switch (error_union.val) { - .err_name => try mod.intern(.{ .undef = payload_type.ip_index }), + .err_name => try mod.intern(.{ .undef = payload_type.toIntern() }), .payload => |payload| payload, }.toValue(), }); @@ -3700,7 +3423,7 @@ pub const DeclGen = struct { fields_buf[0] = try dg.lowerValue(.{ .ty = payload_ty, .val = switch (opt.val) { - .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), else => |payload| payload, }.toValue(), }); @@ -3711,7 +3434,7 @@ pub const DeclGen = struct { } return dg.context.constStruct(&fields_buf, llvm_field_count, .False); }, - .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) { .array_type => switch (aggregate.storage) { .bytes => |bytes| return dg.context.constString( bytes.ptr, @@ -3802,7 +3525,7 @@ pub const DeclGen = struct { const llvm_struct_ty = try dg.lowerType(tv.ty); const gpa = dg.gpa; - const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(tv.ty.toIntern())) { .anon_struct_type => |tuple| { var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{}; defer llvm_fields.deinit(gpa); @@ -3967,9 +3690,9 @@ pub const DeclGen = struct { }, .un => { const llvm_union_ty = try dg.lowerType(tv.ty); - const tag_and_val: Value.Payload.Union.Data = switch (tv.val.ip_index) { + const tag_and_val: Value.Payload.Union.Data = switch (tv.val.toIntern()) { .none => tv.val.castTag(.@"union").?.data, - else => switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .un => |un| .{ .tag = un.tag.toValue(), .val = un.val.toValue() }, else => unreachable, }, @@ -4107,7 +3830,7 @@ pub const DeclGen = struct { fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, byte_aligned: bool) Error!*llvm.Value { const mod = dg.module; const target = mod.getTarget(); - return switch (mod.intern_pool.indexToKey(ptr_val.ip_index)) { + return switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { .int => |int| dg.lowerIntAsPtr(int), .ptr => |ptr| switch (ptr.addr) { .decl => |decl| dg.lowerParentPtrDecl(ptr_val, decl), @@ -4799,7 +4522,6 @@ pub const FuncGen = struct { .vector_store_elem => try self.airVectorStoreElem(inst), .constant => unreachable, - .const_ty => unreachable, .interned => unreachable, .unreach => self.airUnreach(inst), @@ -6108,7 +5830,7 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, struct_llvm_val, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ - .elem_type = llvm_field.ty.ip_index, + .elem_type = llvm_field.ty.toIntern(), .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), }); if (isByRef(field_ty, mod)) { @@ -6984,7 +6706,7 @@ pub const FuncGen = struct { const struct_llvm_ty = try self.dg.lowerType(struct_ty); const field_ptr = self.builder.buildStructGEP(struct_llvm_ty, self.err_ret_trace.?, llvm_field.index, ""); const field_ptr_ty = try mod.ptrType(.{ - .elem_type = llvm_field.ty.ip_index, + .elem_type = llvm_field.ty.toIntern(), .alignment = InternPool.Alignment.fromNonzeroByteUnits(llvm_field.alignment), }); return self.load(field_ptr, field_ptr_ty); @@ -8915,7 +8637,7 @@ pub const FuncGen = struct { fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { const mod = self.dg.module; - const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. const gop = try self.dg.object.named_enum_map.getOrPut(self.dg.gpa, enum_type.decl); @@ -8988,7 +8710,7 @@ pub const FuncGen = struct { fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value { const mod = self.dg.module; - const enum_type = mod.intern_pool.indexToKey(enum_ty.ip_index).enum_type; + const enum_type = mod.intern_pool.indexToKey(enum_ty.toIntern()).enum_type; // TODO: detect when the type changes and re-emit this function. const gop = try self.dg.object.decl_map.getOrPut(self.dg.gpa, enum_type.decl); @@ -10529,7 +10251,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { var offset: u64 = 0; var big_align: u32 = 0; - const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var llvm_field_index: c_uint = 0; for (tuple.types, tuple.values, 0..) |field_ty, field_val, i| { @@ -10927,7 +10649,7 @@ const ParamTypeIterator = struct { .riscv32, .riscv64 => { it.zig_index += 1; it.llvm_index += 1; - if (ty.ip_index == .f16_type) { + if (ty.toIntern() == .f16_type) { return .as_u16; } switch (riscv_c_abi.classifyType(ty, mod)) { @@ -11146,7 +10868,7 @@ fn isByRef(ty: Type, mod: *Module) bool { .Struct => { // Packed structs are represented to LLVM as integers. if (ty.containerLayout(mod) == .Packed) return false; - const struct_type = switch (mod.intern_pool.indexToKey(ty.ip_index)) { + const struct_type = switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { var count: usize = 0; for (tuple.types, tuple.values) |field_ty, field_val| { @@ -11261,7 +10983,7 @@ fn backendSupportsF128(target: std.Target) bool { /// LLVM does not support all relevant intrinsics for all targets, so we /// may need to manually generate a libc call fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool { - return switch (scalar_ty.ip_index) { + return switch (scalar_ty.toIntern()) { .f16_type => backendSupportsF16(target), .f80_type => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target), .f128_type => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 96c723989a..2b04e03a5a 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -616,7 +616,7 @@ pub const DeclGen = struct { const mod = dg.module; var val = arg_val; - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .runtime_value => |rt| val = rt.val.toValue(), else => {}, } @@ -626,75 +626,7 @@ pub const DeclGen = struct { return try self.addUndef(size); } - if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { - .Array => switch (val.tag()) { - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - try self.addBytes(bytes); - if (ty.sentinel(mod)) |sentinel| { - try self.addByte(@intCast(u8, sentinel.toUnsignedInt(mod))); - } - }, - .bytes => { - const bytes = val.castTag(.bytes).?.data; - try self.addBytes(bytes); - }, - else => |tag| return dg.todo("indirect array constant with tag {s}", .{@tagName(tag)}), - }, - .Struct => { - if (ty.isSimpleTupleOrAnonStruct(mod)) { - unreachable; // TODO - } else { - const struct_ty = mod.typeToStruct(ty).?; - - if (struct_ty.layout == .Packed) { - return dg.todo("packed struct constants", .{}); - } - - const struct_begin = self.size; - const field_vals = val.castTag(.aggregate).?.data; - for (struct_ty.fields.values(), 0..) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits(mod)) continue; - try self.lower(field.ty, field_vals[i]); - - // Add padding if required. - // TODO: Add to type generation as well? - const unpadded_field_end = self.size - struct_begin; - const padded_field_end = ty.structFieldOffset(i + 1, mod); - const padding = padded_field_end - unpadded_field_end; - try self.addUndef(padding); - } - } - }, - .Vector, - .Frame, - .AnyFrame, - => return dg.todo("indirect constant of type {}", .{ty.fmt(mod)}), - .Float, - .Union, - .Optional, - .ErrorUnion, - .ErrorSet, - .Int, - .Enum, - .Bool, - .Pointer, - => unreachable, // handled below - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .Opaque, - .EnumLiteral, - .Fn, - => unreachable, // comptime-only types - }; - - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -1876,7 +1808,6 @@ pub const DeclGen = struct { .breakpoint => return, .cond_br => return self.airCondBr(inst), .constant => unreachable, - .const_ty => unreachable, .dbg_stmt => return self.airDbgStmt(inst), .loop => return self.airLoop(inst), .ret => return self.airRet(inst), diff --git a/src/print_air.zig b/src/print_air.zig index 9169a88bbc..58e4029543 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -95,7 +95,7 @@ const Writer = struct { for (w.air.instructions.items(.tag), 0..) |tag, i| { const inst = @intCast(Air.Inst.Index, i); switch (tag) { - .constant, .const_ty, .interned => { + .constant, .interned => { try w.writeInst(s, inst); try s.writeByte('\n'); }, @@ -226,7 +226,6 @@ const Writer = struct { .save_err_return_trace_index, => try w.writeNoOp(s, inst), - .const_ty, .alloc, .ret_ptr, .err_return_trace, diff --git a/src/value.zig b/src/value.zig index 47215e588c..ef3a3f6be1 100644 --- a/src/value.zig +++ b/src/value.zig @@ -37,8 +37,9 @@ pub const Value = struct { /// A slice of u8 whose memory is managed externally. bytes, - /// Similar to bytes however it stores an index relative to `Module.string_literal_bytes`. - str_lit, + /// This value is repeated some number of times. The amount of times to repeat + /// is stored externally. + repeated, /// An instance of a struct, array, or vector. /// Each element/field stored as a `Value`. /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, @@ -57,9 +58,9 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .bytes => Payload.Bytes, + .repeated => Payload.SubValue, - .str_lit => Payload.StrLit, + .bytes => Payload.Bytes, .inferred_alloc => Payload.InferredAlloc, .inferred_alloc_comptime => Payload.InferredAllocComptime, @@ -171,7 +172,18 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .str_lit => return self.copyPayloadShallow(arena, Payload.StrLit), + .repeated => { + const payload = self.cast(Payload.SubValue).?; + const new_payload = try arena.create(Payload.SubValue); + new_payload.* = .{ + .base = payload.base, + .data = try payload.data.copy(arena), + }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; + }, .aggregate => { const payload = self.castTag(.aggregate).?; const new_payload = try arena.create(Payload.Aggregate); @@ -187,7 +199,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .@"union" => { const tag_and_val = self.castTag(.@"union").?.data; const new_payload = try arena.create(Payload.Union); @@ -203,7 +214,6 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .inferred_alloc => unreachable, .inferred_alloc_comptime => unreachable, } @@ -237,7 +247,7 @@ pub const Value = struct { ) !void { comptime assert(fmt.len == 0); if (start_val.ip_index != .none) { - try out_stream.print("(interned: {})", .{start_val.ip_index}); + try out_stream.print("(interned: {})", .{start_val.toIntern()}); return; } var val = start_val; @@ -249,11 +259,9 @@ pub const Value = struct { return out_stream.writeAll("(union value)"); }, .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return out_stream.print("(.str_lit index={d} len={d})", .{ - str_lit.index, str_lit.len, - }); + .repeated => { + try out_stream.writeAll("(repeated) "); + val = val.castTag(.repeated).?.data; }, .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), @@ -274,40 +282,24 @@ pub const Value = struct { /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { - switch (val.ip_index) { - .none => switch (val.tag()) { - .bytes => { - const bytes = val.castTag(.bytes).?.data; - const adjusted_len = bytes.len - @boolToInt(ty.sentinel(mod) != null); - const adjusted_bytes = bytes[0..adjusted_len]; - return allocator.dupe(u8, adjusted_bytes); - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return allocator.dupe(u8, bytes); - }, - else => return arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), + .ptr => |ptr| switch (ptr.len) { + .none => unreachable, + else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { - .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), - .ptr => |ptr| switch (ptr.len) { - .none => unreachable, - else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), - }, - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => |bytes| try allocator.dupe(u8, bytes), - .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), - .repeated_elem => |elem| { - const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); - const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); - @memset(result, byte); - return result; - }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try allocator.dupe(u8, bytes), + .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .repeated_elem => |elem| { + const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); + const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); + @memset(result, byte); + return result; }, - else => unreachable, }, - } + else => unreachable, + }; } fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { @@ -320,13 +312,13 @@ pub const Value = struct { } pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { - if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.ip_index, ty.ip_index); + if (val.ip_index != .none) return mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), ty.toIntern()); switch (val.tag()) { .aggregate => { const old_elems = val.castTag(.aggregate).?.data; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); defer mod.gpa.free(new_elems); - const ty_key = mod.intern_pool.indexToKey(ty.ip_index); + const ty_key = mod.intern_pool.indexToKey(ty.toIntern()); for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i| new_elem.* = try old_elem.intern(switch (ty_key) { .struct_type => ty.structFieldType(field_i, mod), @@ -335,14 +327,14 @@ pub const Value = struct { else => unreachable, }, mod); return mod.intern(.{ .aggregate = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = .{ .elems = new_elems }, } }); }, .@"union" => { const pl = val.castTag(.@"union").?.data; return mod.intern(.{ .un = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .tag = try pl.tag.intern(ty.unionTagTypeHypothetical(mod), mod), .val = try pl.val.intern(ty.unionFieldType(pl.tag, mod), mod), } }); @@ -353,13 +345,15 @@ pub const Value = struct { pub fn unintern(val: Value, arena: Allocator, mod: *Module) Allocator.Error!Value { if (val.ip_index == .none) return val; - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| return Tag.bytes.create(arena, try arena.dupe(u8, bytes)), .elems => |old_elems| { const new_elems = try arena.alloc(Value, old_elems.len); for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = old_elem.toValue(); return Tag.aggregate.create(arena, new_elems); }, + .repeated_elem => |elem| return Tag.repeated.create(arena, elem.toValue()), }, else => return val, } @@ -372,40 +366,38 @@ pub const Value = struct { /// Asserts that the value is representable as a type. pub fn toType(self: Value) Type { - return self.ip_index.toType(); + return self.toIntern().toType(); } pub fn enumToInt(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { const ip = &mod.intern_pool; - switch (val.ip_index) { - else => return switch (ip.indexToKey(ip.typeOf(val.ip_index))) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_literal => |enum_literal| { - const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; - return switch (ip.indexToKey(ty.ip_index)) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_type => |enum_type| if (enum_type.values.len != 0) - enum_type.values[field_index].toValue() - else // Field index and integer values are the same. - mod.intValue(enum_type.tag_ty.toType(), field_index), - else => unreachable, - }; - }, - .enum_type => |enum_type| (try ip.getCoerced( - mod.gpa, - val.ip_index, - enum_type.tag_ty, - )).toValue(), - else => unreachable, + return switch (ip.indexToKey(ip.typeOf(val.toIntern()))) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_literal => |enum_literal| { + const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; + return switch (ip.indexToKey(ty.toIntern())) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_type => |enum_type| if (enum_type.values.len != 0) + enum_type.values[field_index].toValue() + else // Field index and integer values are the same. + mod.intValue(enum_type.tag_ty.toType(), field_index), + else => unreachable, + }; }, - } + .enum_type => |enum_type| (try ip.getCoerced( + mod.gpa, + val.toIntern(), + enum_type.tag_ty, + )).toValue(), + else => unreachable, + }; } pub fn tagName(val: Value, mod: *Module) []const u8 { const ip = &mod.intern_pool; - const enum_tag = switch (ip.indexToKey(val.ip_index)) { + const enum_tag = switch (ip.indexToKey(val.toIntern())) { .un => |un| ip.indexToKey(un.tag).enum_tag, .enum_tag => |x| x, .enum_literal => |name| return ip.stringToSlice(name), @@ -413,7 +405,7 @@ pub const Value = struct { }; const enum_type = ip.indexToKey(enum_tag.ty).enum_type; const field_index = field_index: { - const field_index = enum_type.tagValueIndex(ip, val.ip_index).?; + const field_index = enum_type.tagValueIndex(ip, val.toIntern()).?; break :field_index @intCast(u32, field_index); }; const field_name = enum_type.names[field_index]; @@ -432,12 +424,12 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!BigIntConst { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), .undef => unreachable, .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .runtime_value => |runtime_value| runtime_value.val.toValue().toBigIntAdvanced(space, mod, opt_sema), .int => |int| switch (int.storage) { .u64, .i64, .big_int => int.storage.toBigInt(space), @@ -475,18 +467,18 @@ pub const Value = struct { } pub fn getFunctionIndex(val: Value, mod: *Module) Module.Fn.OptionalIndex { - return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.ip_index) else .none; + return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.toIntern()) else .none; } pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { - return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { .extern_func => |extern_func| extern_func, else => null, } else null; } pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { - return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.ip_index)) { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable, else => null, } else null; @@ -501,11 +493,11 @@ pub const Value = struct { /// If the value fits in a u64, return it, otherwise null. /// Asserts not undefined. pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_false => 0, .bool_true => 1, .undef => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(u64) catch null, .u64 => |x| x, @@ -531,11 +523,11 @@ pub const Value = struct { /// Asserts the value is an integer and it fits in a i64 pub fn toSignedInt(val: Value, mod: *Module) i64 { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_false => 0, .bool_true => 1, .undef => unreachable, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| big_int.to(i64) catch unreachable, .i64 => |x| x, @@ -549,7 +541,7 @@ pub const Value = struct { } pub fn toBool(val: Value, _: *const Module) bool { - return switch (val.ip_index) { + return switch (val.toIntern()) { .bool_true => true, .bool_false => false, else => unreachable, @@ -558,7 +550,7 @@ pub const Value = struct { fn isDeclRef(val: Value, mod: *Module) bool { var check = val; - while (true) switch (mod.intern_pool.indexToKey(check.ip_index)) { + while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => return true, .eu_payload, .opt_payload => |index| check = index.toValue(), @@ -644,7 +636,7 @@ pub const Value = struct { .ErrorSet => { // TODO revisit this when we have the concept of the error tag type const Int = u16; - const name = switch (mod.intern_pool.indexToKey(val.ip_index)) { + const name = switch (mod.intern_pool.indexToKey(val.toIntern())) { .err => |err| err.name, .error_union => |error_union| error_union.val.err_name, else => unreachable, @@ -718,7 +710,7 @@ pub const Value = struct { if (abi_size == 0) return; if (abi_size <= @sizeOf(u64)) { - const ip_key = mod.intern_pool.indexToKey(int_val.ip_index); + const ip_key = mod.intern_pool.indexToKey(int_val.toIntern()); const int: u64 = switch (ip_key.int.storage) { .u64 => |x| x, .i64 => |x| @bitCast(u64, x), @@ -847,7 +839,7 @@ pub const Value = struct { } }, .Float => return (try mod.intern(.{ .float = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { 16 => .{ .f16 = @bitCast(f16, std.mem.readInt(u16, buffer[0..2], endian)) }, 32 => .{ .f32 = @bitCast(f32, std.mem.readInt(u32, buffer[0..4], endian)) }, @@ -860,13 +852,16 @@ pub const Value = struct { .Array => { const elem_ty = ty.childType(mod); const elem_size = elem_ty.abiSize(mod); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); + const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); var offset: usize = 0; for (elems) |*elem| { - elem.* = try readFromMemory(elem_ty, mod, buffer[offset..], arena); + elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod); offset += @intCast(usize, elem_size); } - return Tag.aggregate.create(arena, elems); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue(); }, .Vector => { // We use byte_count instead of abi_size here, so that any padding bytes @@ -878,13 +873,16 @@ pub const Value = struct { .Auto => unreachable, // Sema is supposed to have emitted a compile error already .Extern => { const fields = ty.structFields(mod).values(); - const field_vals = try arena.alloc(Value, fields.len); - for (fields, 0..) |field, i| { + const field_vals = try arena.alloc(InternPool.Index, fields.len); + for (field_vals, fields, 0..) |*field_val, field, i| { const off = @intCast(usize, ty.structFieldOffset(i, mod)); - const sz = @intCast(usize, ty.structFieldType(i, mod).abiSize(mod)); - field_vals[i] = try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena); + const sz = @intCast(usize, field.ty.abiSize(mod)); + field_val.* = try (try readFromMemory(field.ty, mod, buffer[off..(off + sz)], arena)).intern(field.ty, mod); } - return Tag.aggregate.create(arena, field_vals); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); }, .Packed => { const byte_count = (@intCast(usize, ty.bitSize(mod)) + 7) / 8; @@ -897,7 +895,7 @@ pub const Value = struct { const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); const name = mod.error_name_list.items[@intCast(usize, int)]; return (try mod.intern(.{ .err = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .name = mod.intern_pool.getString(name).unwrap().?, } })).toValue(); }, @@ -961,7 +959,7 @@ pub const Value = struct { } }, .Float => return (try mod.intern(.{ .float = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .storage = switch (ty.floatBits(target)) { 16 => .{ .f16 = @bitCast(f16, std.mem.readPackedInt(u16, buffer, bit_offset, endian)) }, 32 => .{ .f32 = @bitCast(f32, std.mem.readPackedInt(u32, buffer, bit_offset, endian)) }, @@ -973,17 +971,20 @@ pub const Value = struct { } })).toValue(), .Vector => { const elem_ty = ty.childType(mod); - const elems = try arena.alloc(Value, @intCast(usize, ty.arrayLen(mod))); + const elems = try arena.alloc(InternPool.Index, @intCast(usize, ty.arrayLen(mod))); var bits: u16 = 0; const elem_bit_size = @intCast(u16, elem_ty.bitSize(mod)); for (elems, 0..) |_, i| { // On big-endian systems, LLVM reverses the element order of vectors by default const tgt_elem_i = if (endian == .Big) elems.len - i - 1 else i; - elems[tgt_elem_i] = try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena); + elems[tgt_elem_i] = try (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).intern(elem_ty, mod); bits += elem_bit_size; } - return Tag.aggregate.create(arena, elems); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue(); }, .Struct => switch (ty.containerLayout(mod)) { .Auto => unreachable, // Sema is supposed to have emitted a compile error already @@ -991,13 +992,16 @@ pub const Value = struct { .Packed => { var bits: u16 = 0; const fields = ty.structFields(mod).values(); - const field_vals = try arena.alloc(Value, fields.len); + const field_vals = try arena.alloc(InternPool.Index, fields.len); for (fields, 0..) |field, i| { const field_bits = @intCast(u16, field.ty.bitSize(mod)); - field_vals[i] = try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena); + field_vals[i] = try (try readFromPackedMemory(field.ty, mod, buffer, bit_offset + bits, arena)).intern(field.ty, mod); bits += field_bits; } - return Tag.aggregate.create(arena, field_vals); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); }, }, .Pointer => { @@ -1015,7 +1019,7 @@ pub const Value = struct { /// Asserts that the value is a float or an integer. pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| @floatCast(T, bigIntToFloat(big_int.limbs, big_int.positive)), inline .u64, .i64 => |x| { @@ -1119,7 +1123,7 @@ pub const Value = struct { pub fn floatCast(self: Value, dest_ty: Type, mod: *Module) !Value { const target = mod.getTarget(); return (try mod.intern(.{ .float = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .storage = switch (dest_ty.floatBits(target)) { 16 => .{ .f16 = self.toFloat(f16, mod) }, 32 => .{ .f32 = self.toFloat(f32, mod) }, @@ -1133,7 +1137,7 @@ pub const Value = struct { /// Asserts the value is a float pub fn floatHasFraction(self: Value, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(self.ip_index)) { + return switch (mod.intern_pool.indexToKey(self.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| @rem(x, 1) != 0, }, @@ -1150,10 +1154,10 @@ pub const Value = struct { mod: *Module, opt_sema: ?*Sema, ) Module.CompileError!std.math.Order { - return switch (lhs.ip_index) { + return switch (lhs.toIntern()) { .bool_false => .eq, .bool_true => .gt, - else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { + else => switch (mod.intern_pool.indexToKey(lhs.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => .gt, .int => |int| int.toValue().orderAgainstZeroAdvanced(mod, opt_sema), @@ -1212,8 +1216,8 @@ pub const Value = struct { const lhs_tag = lhs.tag(); const rhs_tag = rhs.tag(); if (lhs_tag == rhs_tag) { - const lhs_storage = mod.intern_pool.indexToKey(lhs.ip_index).float.storage; - const rhs_storage = mod.intern_pool.indexToKey(rhs.ip_index).float.storage; + const lhs_storage = mod.intern_pool.indexToKey(lhs.toIntern()).float.storage; + const rhs_storage = mod.intern_pool.indexToKey(rhs.toIntern()).float.storage; const lhs128: f128 = switch (lhs_storage) { inline else => |x| x, }; @@ -1336,46 +1340,20 @@ pub const Value = struct { } } - switch (lhs.ip_index) { - .none => switch (lhs.tag()) { - .aggregate => { - for (lhs.castTag(.aggregate).?.data) |elem_val| { - if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false; - } - return true; - }, - .str_lit => { - const str_lit = lhs.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - for (bytes) |byte| { - if (!std.math.compare(byte, op, 0)) return false; - } - return true; - }, - .bytes => { - const bytes = lhs.castTag(.bytes).?.data; - for (bytes) |byte| { - if (!std.math.compare(byte, op, 0)) return false; - } - return true; - }, - else => {}, + switch (mod.intern_pool.indexToKey(lhs.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| if (std.math.isNan(x)) return op == .neq, }, - else => switch (mod.intern_pool.indexToKey(lhs.ip_index)) { - .float => |float| switch (float.storage) { - inline else => |x| if (std.math.isNan(x)) return op == .neq, - }, - .aggregate => |aggregate| return switch (aggregate.storage) { - .bytes => |bytes| for (bytes) |byte| { - if (!std.math.order(byte, 0).compare(op)) break false; - } else true, - .elems => |elems| for (elems) |elem| { - if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; - } else true, - .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema), - }, - else => {}, + .aggregate => |aggregate| return switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| { + if (!std.math.order(byte, 0).compare(op)) break false; + } else true, + .elems => |elems| for (elems) |elem| { + if (!try elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + } else true, + .repeated_elem => |elem| elem.toValue().compareAllWithZeroAdvancedExtra(op, mod, opt_sema), }, + else => {}, } return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); } @@ -1412,7 +1390,7 @@ pub const Value = struct { const b_field_vals = b.castTag(.aggregate).?.data; assert(a_field_vals.len == b_field_vals.len); - switch (mod.intern_pool.indexToKey(ty.ip_index)) { + switch (mod.intern_pool.indexToKey(ty.toIntern())) { .anon_struct_type => |anon_struct| { assert(anon_struct.types.len == a_field_vals.len); for (anon_struct.types, 0..) |field_ty, i| { @@ -1577,7 +1555,7 @@ pub const Value = struct { // The InternPool data structure hashes based on Key to make interned objects // unique. An Index can be treated simply as u32 value for the // purpose of Type/Value hashing and equality. - std.hash.autoHash(hasher, val.ip_index); + std.hash.autoHash(hasher, val.toIntern()); return; } const zig_ty_tag = ty.zigTypeTag(mod); @@ -1663,7 +1641,7 @@ pub const Value = struct { // The InternPool data structure hashes based on Key to make interned objects // unique. An Index can be treated simply as u32 value for the // purpose of Type/Value hashing and equality. - std.hash.autoHash(hasher, val.ip_index); + std.hash.autoHash(hasher, val.toIntern()); return; } @@ -1703,7 +1681,7 @@ pub const Value = struct { }, .Union => { hasher.update(val.tagName(mod)); - switch (mod.intern_pool.indexToKey(val.ip_index)) { + switch (mod.intern_pool.indexToKey(val.toIntern())) { .un => |un| { const active_field_ty = ty.unionFieldType(un.tag.toValue(), mod); un.val.toValue().hashUncoerced(active_field_ty, hasher, mod); @@ -1746,7 +1724,7 @@ pub const Value = struct { }; pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .mut_decl, .comptime_field => true, .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isComptimeMutablePtr(mod), @@ -1758,8 +1736,8 @@ pub const Value = struct { } pub fn canMutateComptimeVarState(val: Value, mod: *Module) bool { - return val.isComptimeMutablePtr(mod) or switch (val.ip_index) { - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + return val.isComptimeMutablePtr(mod) or switch (val.toIntern()) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .error_union => |error_union| switch (error_union.val) { .err_name => false, .payload => |payload| payload.toValue().canMutateComptimeVarState(mod), @@ -1785,7 +1763,7 @@ pub const Value = struct { /// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), /// this function returns null. pub fn pointerDecl(val: Value, mod: *Module) ?Module.Decl.Index { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable.decl, .extern_func => |extern_func| extern_func.decl, .func => |func| mod.funcPtr(func.index).owner_decl, @@ -1811,35 +1789,19 @@ pub const Value = struct { pub const slice_len_index = 1; pub fn slicePtr(val: Value, mod: *Module) Value { - return mod.intern_pool.slicePtr(val.ip_index).toValue(); + return mod.intern_pool.slicePtr(val.toIntern()).toValue(); } pub fn sliceLen(val: Value, mod: *Module) u64 { - return mod.intern_pool.sliceLen(val.ip_index).toValue().toUnsignedInt(mod); + return mod.intern_pool.sliceLen(val.toIntern()).toValue().toUnsignedInt(mod); } /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { - switch (val.ip_index) { + switch (val.toIntern()) { .undef => return Value.undef, - .none => switch (val.tag()) { - .bytes => { - const byte = val.castTag(.bytes).?.data[index]; - return mod.intValue(Type.u8, byte); - }, - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const byte = bytes[index]; - return mod.intValue(Type.u8, byte); - }, - - .aggregate => return val.castTag(.aggregate).?.data[index], - - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), @@ -1871,26 +1833,26 @@ pub const Value = struct { } pub fn isLazyAlign(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| int.storage == .lazy_align, else => false, }; } pub fn isLazySize(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| int.storage == .lazy_size, else => false, }; } pub fn isRuntimeValue(val: Value, mod: *Module) bool { - return mod.intern_pool.indexToKey(val.ip_index) == .runtime_value; + return mod.intern_pool.indexToKey(val.toIntern()) == .runtime_value; } /// Returns true if a Value is backed by a variable pub fn isVariable(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => true, .ptr => |ptr| switch (ptr.addr) { .decl => |decl_index| { @@ -1913,7 +1875,7 @@ pub const Value = struct { } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable.is_threadlocal, .ptr => |ptr| switch (ptr.addr) { .decl => |decl_index| { @@ -1943,55 +1905,30 @@ pub const Value = struct { start: usize, end: usize, ) error{OutOfMemory}!Value { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - return Tag.str_lit.create(arena, .{ - .index = @intCast(u32, str_lit.index + start), - .len = @intCast(u32, end - start), - }); - }, + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), + .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), + .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), else => unreachable, }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), - .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), - .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), - .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), - else => unreachable, + .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ + .ty = mod.intern_pool.typeOf(val.toIntern()), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = bytes[start..end] }, + .elems => |elems| .{ .elems = elems[start..end] }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, }, - .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ - .ty = mod.intern_pool.typeOf(val.ip_index), - .storage = switch (aggregate.storage) { - .bytes => |bytes| .{ .bytes = bytes[start..end] }, - .elems => |elems| .{ .elems = elems[start..end] }, - .repeated_elem => |elem| .{ .repeated_elem = elem }, - }, - } })).toValue(), - else => unreachable, - }, + } })).toValue(), + else => unreachable, }; } pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { - switch (val.ip_index) { + switch (val.toIntern()) { .undef => return Value.undef, - .none => switch (val.tag()) { - .aggregate => { - const field_values = val.castTag(.aggregate).?.data; - return field_values[index]; - }, - .@"union" => { - const payload = val.castTag(.@"union").?.data; - // TODO assert the tag is correct - return payload.val; - }, - else => unreachable, - }, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try mod.intern(.{ .int = .{ .ty = .u8_type, @@ -2000,6 +1937,8 @@ pub const Value = struct { .elems => |elems| elems[index], .repeated_elem => |elem| elem, }.toValue(), + // TODO assert the tag is correct + .un => |un| un.val.toValue(), else => unreachable, }, } @@ -2007,7 +1946,7 @@ pub const Value = struct { pub fn unionTag(val: Value, mod: *Module) Value { if (val.ip_index == .none) return val.castTag(.@"union").?.data.tag; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef, .enum_tag => val, .un => |un| un.tag.toValue(), else => unreachable, @@ -2022,12 +1961,12 @@ pub const Value = struct { mod: *Module, ) Allocator.Error!Value { const elem_ty = ty.elemType2(mod); - const ptr_val = switch (mod.intern_pool.indexToKey(val.ip_index)) { + const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| ptr: { switch (ptr.addr) { .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) return (try mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .addr = .{ .elem = .{ .base = elem.base, .index = elem.index + index, @@ -2043,9 +1982,9 @@ pub const Value = struct { else => val, }; return (try mod.intern(.{ .ptr = .{ - .ty = ty.ip_index, + .ty = ty.toIntern(), .addr = .{ .elem = .{ - .base = ptr_val.ip_index, + .base = ptr_val.toIntern(), .index = index, } }, } })).toValue(); @@ -2053,7 +1992,7 @@ pub const Value = struct { pub fn isUndef(val: Value, mod: *Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => true, .simple_value => |v| v == .undefined, else => false, @@ -2070,15 +2009,9 @@ pub const Value = struct { /// Returns true if any value contained in `self` is undefined. pub fn anyUndef(val: Value, mod: *Module) !bool { if (val.ip_index == .none) return false; - return switch (val.ip_index) { + return switch (val.toIntern()) { .undef => true, - .none => switch (val.tag()) { - .aggregate => for (val.castTag(.aggregate).?.data) |field| { - if (try field.anyUndef(mod)) break true; - } else false, - else => false, - }, - else => switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .undef => true, .simple_value => |v| v == .undefined, .ptr => |ptr| switch (ptr.len) { @@ -2098,13 +2031,13 @@ pub const Value = struct { /// Asserts the value is not undefined and not unreachable. /// Integer value 0 is considered null because of C pointers. pub fn isNull(val: Value, mod: *Module) bool { - return switch (val.ip_index) { + return switch (val.toIntern()) { .undef => unreachable, .unreachable_value => unreachable, .null_value => true, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => { var buf: BigIntSpace = undefined; return val.toBigInt(&buf, mod).eqZero(); @@ -2120,7 +2053,7 @@ pub const Value = struct { /// something is an error or not because it works without having to figure out the /// string. pub fn getError(self: Value, mod: *const Module) ?[]const u8 { - return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.ip_index)) { + return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.toIntern())) { .err => |err| err.name.toOptional(), .error_union => |error_union| switch (error_union.val) { .err_name => |err_name| err_name.toOptional(), @@ -2133,12 +2066,12 @@ pub const Value = struct { /// Assumes the type is an error union. Returns true if and only if the value is /// the error union payload, not an error. pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool { - return mod.intern_pool.indexToKey(val.ip_index).error_union.val == .payload; + return mod.intern_pool.indexToKey(val.toIntern()).error_union.val == .payload; } /// Value of the optional, null if optional has no payload. pub fn optionalValue(val: Value, mod: *const Module) ?Value { - return switch (mod.intern_pool.indexToKey(val.ip_index).opt.val) { + return switch (mod.intern_pool.indexToKey(val.toIntern()).opt.val) { .none => null, else => |index| index.toValue(), }; @@ -2146,14 +2079,9 @@ pub const Value = struct { /// Valid for all types. Asserts the value is not undefined. pub fn isFloat(self: Value, mod: *const Module) bool { - return switch (self.ip_index) { + return switch (self.toIntern()) { .undef => unreachable, - .none => switch (self.tag()) { - .inferred_alloc => unreachable, - .inferred_alloc_comptime => unreachable, - else => false, - }, - else => switch (mod.intern_pool.indexToKey(self.ip_index)) { + else => switch (mod.intern_pool.indexToKey(self.toIntern())) { .float => true, else => false, }, @@ -2169,21 +2097,24 @@ pub const Value = struct { pub fn intToFloatAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { if (int_ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, int_ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); const scalar_ty = float_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try intToFloatScalar(elem_val, scalar_ty, mod, opt_sema); + scalar.* = try (try intToFloatScalar(elem_val, scalar_ty, mod, opt_sema)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intToFloatScalar(val, float_ty, mod, opt_sema); } pub fn intToFloatScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - return switch (val.ip_index) { + return switch (val.toIntern()) { .undef => val, - else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { .int => |int| switch (int.storage) { .big_int => |big_int| { const float = bigIntToFloat(big_int.limbs, big_int.positive); @@ -2217,7 +2148,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = dest_ty.ip_index, + .ty = dest_ty.toIntern(), .storage = storage, } })).toValue(); } @@ -2245,14 +2176,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); + scalar.* = try (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intAddSatScalar(lhs, rhs, ty, arena, mod); } @@ -2292,14 +2226,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); + scalar.* = try (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intSubSatScalar(lhs, rhs, ty, arena, mod); } @@ -2338,19 +2275,26 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try arena.alloc(Value, ty.vectorLen(mod)); - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const vec_len = ty.vectorLen(mod); + const overflowed_data = try arena.alloc(InternPool.Index, vec_len); + const result_data = try arena.alloc(InternPool.Index, vec_len); const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod); @@ -2400,13 +2344,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try numberMulWrapScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return numberMulWrapScalar(lhs, rhs, ty, arena, mod); } @@ -2442,13 +2390,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intMulSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intMulSatScalar(lhs, rhs, ty, arena, mod); } @@ -2515,12 +2467,16 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try bitwiseNotScalar(elem_val, ty.scalarType(mod), arena, mod); + scalar.* = try (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseNotScalar(val, ty, arena, mod); } @@ -2552,13 +2508,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseAndScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); + scalar.* = try (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseAndScalar(lhs, rhs, ty, allocator, mod); } @@ -2586,13 +2546,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseNandScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseNandScalar(lhs, rhs, ty, arena, mod); } @@ -2609,13 +2573,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseOrScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); + scalar.* = try (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseOrScalar(lhs, rhs, ty, allocator, mod); } @@ -2642,14 +2610,17 @@ pub const Value = struct { /// operands must be (vectors of) integers; handles undefined scalars. pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return bitwiseXorScalar(lhs, rhs, ty, allocator, mod); } @@ -2676,14 +2647,17 @@ pub const Value = struct { pub fn intDiv(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intDivScalar(lhs, rhs, ty, allocator, mod); } @@ -2715,14 +2689,17 @@ pub const Value = struct { pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intDivFloorScalar(lhs, rhs, ty, allocator, mod); } @@ -2754,14 +2731,17 @@ pub const Value = struct { pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intModScalar(lhs, rhs, ty, allocator, mod); } @@ -2794,7 +2774,7 @@ pub const Value = struct { /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. pub fn isNan(val: Value, mod: *const Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| std.math.isNan(x), }, @@ -2805,7 +2785,7 @@ pub const Value = struct { /// Returns true if the value is a floating point type and is infinite. Returns false otherwise. pub fn isInf(val: Value, mod: *const Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| std.math.isInf(x), }, @@ -2815,7 +2795,7 @@ pub const Value = struct { pub fn isNegativeInf(val: Value, mod: *const Module) bool { if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.ip_index)) { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { .float => |float| switch (float.storage) { inline else => |x| std.math.isNegativeInf(x), }, @@ -2825,13 +2805,17 @@ pub const Value = struct { pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatRemScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatRemScalar(lhs, rhs, float_type, mod); } @@ -2847,20 +2831,24 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatModScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatModScalar(lhs, rhs, float_type, mod); } @@ -2876,21 +2864,24 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn intMul(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intMulScalar(lhs, rhs, ty, allocator, mod); } @@ -2918,13 +2909,16 @@ pub const Value = struct { pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intTruncScalar(val, ty, allocator, signedness, bits, mod); } @@ -2939,14 +2933,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); const bits_elem = try bits.elemValue(mod, i); - scalar.* = try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(u16, bits_elem.toUnsignedInt(mod)), mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return intTruncScalar(val, ty, allocator, signedness, @intCast(u16, bits.toUnsignedInt(mod)), mod); } @@ -2976,14 +2973,17 @@ pub const Value = struct { pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shlScalar(lhs, rhs, ty, allocator, mod); } @@ -3015,18 +3015,26 @@ pub const Value = struct { mod: *Module, ) !OverflowArithmeticResult { if (ty.zigTypeTag(mod) == .Vector) { - const overflowed_data = try allocator.alloc(Value, ty.vectorLen(mod)); - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); - for (result_data, 0..) |*scalar, i| { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try allocator.alloc(InternPool.Index, vec_len); + const result_data = try allocator.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType(mod), allocator, mod); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + of.* = try of_math_result.overflow_bit.intern(Type.bool, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(allocator, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(allocator, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod); @@ -3071,13 +3079,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shlSatScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shlSatScalar(lhs, rhs, ty, arena, mod); } @@ -3117,13 +3129,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, ty.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shlTruncScalar(lhs_elem, rhs_elem, ty.scalarType(mod), arena, mod); + scalar.* = try (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shlTruncScalar(lhs, rhs, ty, arena, mod); } @@ -3143,14 +3159,17 @@ pub const Value = struct { pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(Value, ty.vectorLen(mod)); + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + scalar.* = try (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(allocator, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return shrScalar(lhs, rhs, ty, allocator, mod); } @@ -3193,12 +3212,16 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try floatNegScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try floatNegScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatNegScalar(val, float_type, mod); } @@ -3218,7 +3241,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3231,13 +3254,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatAddScalar(lhs, rhs, float_type, mod); } @@ -3258,7 +3285,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3271,13 +3298,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatSubScalar(lhs, rhs, float_type, mod); } @@ -3298,7 +3329,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3311,13 +3342,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatDivScalar(lhs, rhs, float_type, mod); } @@ -3338,7 +3373,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3351,13 +3386,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivFloorScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatDivFloorScalar(lhs, rhs, float_type, mod); } @@ -3378,7 +3417,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3391,13 +3430,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatDivTruncScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatDivTruncScalar(lhs, rhs, float_type, mod); } @@ -3418,7 +3461,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3431,13 +3474,17 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(mod, i); const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try floatMulScalar(lhs_elem, rhs_elem, float_type.scalarType(mod), mod); + scalar.* = try (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floatMulScalar(lhs, rhs, float_type, mod); } @@ -3458,19 +3505,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try sqrtScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try sqrtScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sqrtScalar(val, float_type, mod); } @@ -3486,19 +3537,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try sinScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try sinScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sinScalar(val, float_type, mod); } @@ -3514,19 +3569,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try cosScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try cosScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return cosScalar(val, float_type, mod); } @@ -3542,19 +3601,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try tanScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try tanScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return tanScalar(val, float_type, mod); } @@ -3570,19 +3633,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try expScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try expScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return expScalar(val, float_type, mod); } @@ -3598,19 +3665,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try exp2Scalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try exp2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return exp2Scalar(val, float_type, mod); } @@ -3626,19 +3697,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try logScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try logScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return logScalar(val, float_type, mod); } @@ -3654,19 +3729,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try log2Scalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try log2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return log2Scalar(val, float_type, mod); } @@ -3682,19 +3761,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try log10Scalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try log10Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return log10Scalar(val, float_type, mod); } @@ -3710,19 +3793,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn fabs(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try fabsScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try fabsScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return fabsScalar(val, float_type, mod); } @@ -3738,19 +3825,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try floorScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try floorScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return floorScalar(val, float_type, mod); } @@ -3766,19 +3857,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try ceilScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try ceilScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return ceilScalar(val, float_type, mod); } @@ -3794,19 +3889,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try roundScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try roundScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return roundScalar(val, float_type, mod); } @@ -3822,19 +3921,23 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const elem_val = try val.elemValue(mod, i); - scalar.* = try truncScalar(elem_val, float_type.scalarType(mod), mod); + scalar.* = try (try truncScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return truncScalar(val, float_type, mod); } @@ -3850,7 +3953,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3864,20 +3967,18 @@ pub const Value = struct { mod: *Module, ) !Value { if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(Value, float_type.vectorLen(mod)); + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); for (result_data, 0..) |*scalar, i| { const mulend1_elem = try mulend1.elemValue(mod, i); const mulend2_elem = try mulend2.elemValue(mod, i); const addend_elem = try addend.elemValue(mod, i); - scalar.* = try mulAddScalar( - float_type.scalarType(mod), - mulend1_elem, - mulend2_elem, - addend_elem, - mod, - ); + scalar.* = try (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return mulAddScalar(float_type, mulend1, mulend2, addend, mod); } @@ -3899,7 +4000,7 @@ pub const Value = struct { else => unreachable, }; return (try mod.intern(.{ .float = .{ - .ty = float_type.ip_index, + .ty = float_type.toIntern(), .storage = storage, } })).toValue(); } @@ -3931,22 +4032,22 @@ pub const Value = struct { } pub fn isGenericPoison(val: Value) bool { - return val.ip_index == .generic_poison; + return val.toIntern() == .generic_poison; } /// This type is not copyable since it may contain pointers to its inner data. pub const Payload = struct { tag: Tag, - pub const Bytes = struct { + pub const SubValue = struct { base: Payload, - /// Includes the sentinel, if any. - data: []const u8, + data: Value, }; - pub const StrLit = struct { + pub const Bytes = struct { base: Payload, - data: Module.StringLiteralContext.Key, + /// Includes the sentinel, if any. + data: []const u8, }; pub const Aggregate = struct { -- cgit v1.2.3 From 70cc68e9994f7dca53904075e15b2b6f87342539 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 25 May 2023 19:23:01 -0400 Subject: Air: remove constant tag Some uses have been moved to their own tag, the rest use interned. Also, finish porting comptime mutation to be more InternPool aware. --- src/Air.zig | 18 +- src/InternPool.zig | 18 +- src/Liveness.zig | 22 +- src/Liveness/Verify.zig | 40 +- src/Module.zig | 9 +- src/Sema.zig | 1245 +++++++++++++++++++---------------------- src/TypedValue.zig | 54 ++ src/arch/aarch64/CodeGen.zig | 12 +- src/arch/arm/CodeGen.zig | 12 +- src/arch/riscv64/CodeGen.zig | 12 +- src/arch/sparc64/CodeGen.zig | 12 +- src/arch/wasm/CodeGen.zig | 5 +- src/arch/x86_64/CodeGen.zig | 19 +- src/codegen/c.zig | 5 +- src/codegen/llvm.zig | 3 +- src/codegen/spirv.zig | 1 - src/print_air.zig | 12 +- src/value.zig | 198 ++++--- tools/lldb_pretty_printers.py | 6 + 19 files changed, 863 insertions(+), 840 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index 4f36cf8bc1..95ed7d33f1 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -186,6 +186,14 @@ pub const Inst = struct { /// Allocates stack local memory. /// Uses the `ty` field. alloc, + /// This is a special value that tracks a set of types that have been stored + /// to an inferred allocation. It does not support any of the normal value queries. + /// Uses the `ty_pl` field, payload is an index of `values` array. + inferred_alloc, + /// Used to coordinate alloc_inferred, store_to_inferred_ptr, and resolve_inferred_alloc + /// instructions for comptime code. + /// Uses the `ty_pl` field, payload is an index of `values` array. + inferred_alloc_comptime, /// If the function will pass the result by-ref, this instruction returns the /// result pointer. Otherwise it is equivalent to `alloc`. /// Uses the `ty` field. @@ -397,9 +405,6 @@ pub const Inst = struct { /// was executed on the operand. /// Uses the `ty_pl` field. Payload is `TryPtr`. try_ptr, - /// A comptime-known value. Uses the `ty_pl` field, payload is index of - /// `values` array. - constant, /// A comptime-known value via an index into the InternPool. /// Uses the `interned` field. interned, @@ -1265,7 +1270,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .assembly, .block, - .constant, .struct_field_ptr, .struct_field_val, .slice_elem_ptr, @@ -1283,6 +1287,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { .sub_with_overflow, .mul_with_overflow, .shl_with_overflow, + .inferred_alloc, + .inferred_alloc_comptime, .ptr_add, .ptr_sub, .try_ptr, @@ -1495,7 +1501,6 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index); const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { - .constant => return air.values[air_datas[inst_index].ty_pl.payload], .interned => return air_datas[inst_index].interned.toValue(), else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), } @@ -1603,6 +1608,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { .mul_with_overflow, .shl_with_overflow, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, .bit_and, .bit_or, @@ -1651,7 +1658,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { .cmp_neq_optimized, .cmp_vector, .cmp_vector_optimized, - .constant, .interned, .is_null, .is_non_null, diff --git a/src/InternPool.zig b/src/InternPool.zig index 429b86a8a6..dfde352600 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -515,10 +515,12 @@ pub const Key = union(enum) { pub const ErrorUnion = struct { ty: Index, - val: union(enum) { + val: Value, + + pub const Value = union(enum) { err_name: NullTerminatedString, payload: Index, - }, + }; }; pub const EnumTag = struct { @@ -1068,7 +1070,7 @@ pub const Key = union(enum) { .false, .true => .bool_type, .empty_struct => .empty_struct_type, .@"unreachable" => .noreturn_type, - .generic_poison => unreachable, + .generic_poison => .generic_poison_type, }, }; } @@ -2671,6 +2673,10 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .only_possible_value => { const ty = @intToEnum(Index, data); return switch (ip.indexToKey(ty)) { + .array_type, .vector_type => .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = &.{} }, + } }, // TODO: migrate structs to properly use the InternPool rather // than using the SegmentedList trick, then the struct type will // have a slice of comptime values that can be used here for when @@ -3184,7 +3190,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); try ip.items.ensureUnusedCapacity(gpa, 1); ip.items.appendAssumeCapacity(.{ - .tag = .ptr_elem, + .tag = switch (ptr.addr) { + .elem => .ptr_elem, + .field => .ptr_field, + else => unreachable, + }, .data = try ip.addExtra(gpa, PtrBaseIndex{ .ty = ptr.ty, .base = base_index.base, diff --git a/src/Liveness.zig b/src/Liveness.zig index c30708e140..4f3d87d3c2 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -321,8 +321,9 @@ pub fn categorizeOperand( .arg, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, - .constant, .interned, .trap, .breakpoint, @@ -973,9 +974,7 @@ fn analyzeInst( .work_group_id, => return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }), - .constant, - .interned, - => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .trap, .unreach, @@ -1269,10 +1268,7 @@ fn analyzeOperands( const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants - switch (inst_tags[operand]) { - .constant, .interned => continue, - else => {}, - } + if (inst_tags[operand] == .interned) continue; _ = try data.live_set.put(gpa, operand, {}); } @@ -1305,10 +1301,7 @@ fn analyzeOperands( const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants - switch (inst_tags[operand]) { - .constant, .interned => continue, - else => {}, - } + if (inst_tags[operand] == .interned) continue; const mask = @as(Bpi, 1) << @intCast(OperandInt, i); @@ -1839,10 +1832,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); - switch (inst_tags[operand]) { - .constant, .interned => return, - else => {}, - } + if (inst_tags[operand] == .interned) return // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 703d561559..cbb7f9f143 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -41,8 +41,9 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { // no operands .arg, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, - .constant, .interned, .breakpoint, .dbg_stmt, @@ -554,16 +555,18 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err } fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { - const operand = Air.refToIndexAllowNone(op_ref) orelse return; - switch (self.air.instructions.items(.tag)[operand]) { - .constant, .interned => {}, - else => { - if (dies) { - if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); - } else { - if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand }); - } - }, + const operand = Air.refToIndexAllowNone(op_ref) orelse { + assert(!dies); + return; + }; + if (self.air.instructions.items(.tag)[operand] == .interned) { + assert(!dies); + return; + } + if (dies) { + if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); + } else { + if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand }); } } @@ -576,16 +579,11 @@ fn verifyInst( const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index)); try self.verifyOperand(inst, operand, dies); } - const tag = self.air.instructions.items(.tag); - switch (tag[inst]) { - .constant, .interned => unreachable, - else => { - if (self.liveness.isUnused(inst)) { - assert(!self.live.contains(inst)); - } else { - try self.live.putNoClobber(self.gpa, inst, {}); - } - }, + if (self.air.instructions.items(.tag)[inst] == .interned) return; + if (self.liveness.isUnused(inst)) { + assert(!self.live.contains(inst)); + } else { + try self.live.putNoClobber(self.gpa, inst, {}); } } diff --git a/src/Module.zig b/src/Module.zig index 47f7643b9f..d3045631c5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -764,14 +764,7 @@ pub const Decl = struct { pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue { if (!decl.has_tv) return error.AnalysisFail; - return TypedValue{ - .ty = decl.ty, - .val = decl.val, - }; - } - - pub fn value(decl: *Decl) error{AnalysisFail}!Value { - return (try decl.typedValue()).val; + return TypedValue{ .ty = decl.ty, .val = decl.val }; } pub fn isFunction(decl: Decl, mod: *const Module) !bool { diff --git a/src/Sema.zig b/src/Sema.zig index a7416af286..55adc2fffb 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1991,23 +1991,21 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( const i = int - InternPool.static_len; const air_tags = sema.air_instructions.items(.tag); if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| { - if (air_tags[i] == .constant) { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; - const val = sema.air_values.items[ty_pl.payload]; + if (air_tags[i] == .interned) { + const interned = sema.air_instructions.items(.data)[i].interned; + const val = interned.toValue(); if (val.getVariable(sema.mod) != null) return val; } return opv; } const air_datas = sema.air_instructions.items(.data); switch (air_tags[i]) { - .constant => { - const ty_pl = air_datas[i].ty_pl; - const val = sema.air_values.items[ty_pl.payload]; + .interned => { + const val = air_datas[i].interned.toValue(); if (val.isRuntimeValue(sema.mod)) make_runtime.* = true; if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; return val; }, - .interned => return air_datas[i].interned.toValue(), else => return null, } } @@ -2440,64 +2438,64 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const addr_space = target_util.defaultAddressSpace(target, .local); if (Air.refToIndex(ptr)) |ptr_inst| { - if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { - .inferred_alloc => { - const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; - // Add the stored instruction to the set we will use to resolve peer types - // for the inferred allocation. - // This instruction will not make it to codegen; it is only to participate - // in the `stored_inst_list` of the `inferred_alloc`. - var trash_block = block.makeSubBlock(); - defer trash_block.instructions.deinit(sema.gpa); - const operand = try trash_block.addBitCast(pointee_ty, .void_value); - - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = inferred_alloc.alignment, - .@"addrspace" = addr_space, - }); - const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); + switch (sema.air_instructions.items(.tag)[ptr_inst]) { + .inferred_alloc => { + const air_datas = sema.air_instructions.items(.data); + const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; + const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; + // Add the stored instruction to the set we will use to resolve peer types + // for the inferred allocation. + // This instruction will not make it to codegen; it is only to participate + // in the `stored_inst_list` of the `inferred_alloc`. + var trash_block = block.makeSubBlock(); + defer trash_block.instructions.deinit(sema.gpa); + const operand = try trash_block.addBitCast(pointee_ty, .void_value); + + const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + .pointee_type = pointee_ty, + .@"align" = inferred_alloc.alignment, + .@"addrspace" = addr_space, + }); + const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); - try inferred_alloc.prongs.append(sema.arena, .{ - .stored_inst = operand, - .placeholder = Air.refToIndex(bitcasted_ptr).?, - }); + try inferred_alloc.prongs.append(sema.arena, .{ + .stored_inst = operand, + .placeholder = Air.refToIndex(bitcasted_ptr).?, + }); - return bitcasted_ptr; - }, - .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; - // There will be only one coerce_result_ptr because we are running at comptime. - // The alloc will turn into a Decl. - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - iac.data.decl_index = try anon_decl.finish( - pointee_ty, - Value.undef, - iac.data.alignment, - ); - if (iac.data.alignment != 0) { - try sema.resolveTypeLayout(pointee_ty); - } - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = iac.data.alignment, - .@"addrspace" = addr_space, - }); - try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); - return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ - .ty = ptr_ty.toIntern(), - .addr = .{ .mut_decl = .{ - .decl = iac.data.decl_index, - .runtime_index = block.runtime_index, - } }, - } })).toValue()); - }, - else => {}, - } + return bitcasted_ptr; + }, + .inferred_alloc_comptime => { + const air_datas = sema.air_instructions.items(.data); + const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; + const iac = ptr_val.castTag(.inferred_alloc_comptime).?; + // There will be only one coerce_result_ptr because we are running at comptime. + // The alloc will turn into a Decl. + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + iac.data.decl_index = try anon_decl.finish( + pointee_ty, + Value.undef, + iac.data.alignment, + ); + if (iac.data.alignment != 0) { + try sema.resolveTypeLayout(pointee_ty); + } + const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + .pointee_type = pointee_ty, + .@"align" = iac.data.alignment, + .@"addrspace" = addr_space, + }); + try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); + return sema.addConstant(ptr_ty, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_ty.toIntern(), + .addr = .{ .mut_decl = .{ + .decl = iac.data.decl_index, + .runtime_index = block.runtime_index, + } }, + } })).toValue()); + }, + else => {}, } } @@ -3458,6 +3456,7 @@ fn zirAllocExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node }; const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node }; @@ -3487,13 +3486,19 @@ fn zirAllocExtended( if (small.has_type) { return sema.analyzeComptimeAlloc(block, var_ty, alignment); } else { - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = alignment, - }), - ); + const ty_inst = try sema.addType(inferred_alloc_ty); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + .decl_index = undefined, + .alignment = alignment, + })); + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } } @@ -3511,17 +3516,19 @@ fn zirAllocExtended( return block.addTy(.alloc, ptr_type); } - // `Sema.addConstant` does not add the instruction to the block because it is - // not needed in the case of constant values. However here, we plan to "downgrade" - // to a normal instruction when we hit `resolve_inferred_alloc`. So we append - // to the block even though it is currently a `.constant`. - const result = try sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = alignment }), - ); - try block.instructions.append(sema.gpa, Air.refToIndex(result).?); - try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {}); - return result; + const ty_inst = try sema.addType(inferred_alloc_ty); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc.create(sema.arena, .{ + .alignment = alignment, + })); + const result_index = try block.addInstAsIndex(.{ + .tag = .inferred_alloc, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, {}); + return Air.indexToRef(result_index); } fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3616,16 +3623,24 @@ fn zirAllocInferredComptime( inst: Zir.Inst.Index, inferred_alloc_ty: Type, ) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = 0, - }), - ); + + const ty_inst = try sema.addType(inferred_alloc_ty); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + .decl_index = undefined, + .alignment = 0, + })); + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3676,31 +3691,39 @@ fn zirAllocInferred( const tracy = trace(@src()); defer tracy.end(); + const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; + const ty_inst = try sema.addType(inferred_alloc_ty); if (block.is_comptime) { - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ - .decl_index = undefined, - .alignment = 0, - }), - ); + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + .decl_index = undefined, + .alignment = 0, + })); + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } - // `Sema.addConstant` does not add the instruction to the block because it is - // not needed in the case of constant values. However here, we plan to "downgrade" - // to a normal instruction when we hit `resolve_inferred_alloc`. So we append - // to the block even though it is currently a `.constant`. - const result = try sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = 0 }), - ); - try block.instructions.append(sema.gpa, Air.refToIndex(result).?); - try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {}); - return result; + try sema.air_values.append(gpa, try Value.Tag.inferred_alloc.create(sema.arena, .{ + .alignment = 0, + })); + const result_index = try block.addInstAsIndex(.{ + .tag = .inferred_alloc, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, {}); + return Air.indexToRef(result_index); } fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -3712,7 +3735,6 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_inst = Air.refToIndex(ptr).?; - assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload; const ptr_val = sema.air_values.items[value_index]; const var_is_mut = switch (sema.typeOf(ptr).toIntern()) { @@ -3722,7 +3744,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com }; const target = sema.mod.getTarget(); - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; const decl_index = iac.data.decl_index; @@ -3767,7 +3789,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Detect if the value is comptime-known. In such case, the // last 3 AIR instructions of the block will look like this: // - // %a = constant + // %a = interned // %b = bitcast(%a) // %c = store(%b, %d) // @@ -3814,7 +3836,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const candidate = block.instructions.items[search_index]; switch (air_tags[candidate]) { .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue, - .constant => break candidate, + .interned => break candidate, else => break :ct, } }; @@ -4981,15 +5003,15 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const src: LazySrcLoc = sema.src; blk: { const ptr_inst = Air.refToIndex(ptr) orelse break :blk; - if (sema.air_instructions.items(.tag)[ptr_inst] != .constant) break :blk; - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { + const air_data = sema.air_instructions.items(.data)[ptr_inst]; + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { + const ptr_val = sema.air_values.items[air_data.ty_pl.payload]; const iac = ptr_val.castTag(.inferred_alloc_comptime).?; return sema.storeToInferredAllocComptime(block, src, operand, iac); }, .inferred_alloc => { + const ptr_val = sema.air_values.items[air_data.ty_pl.payload]; const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); }, @@ -5009,11 +5031,10 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const ptr = try sema.resolveInst(bin_inst.lhs); const operand = try sema.resolveInst(bin_inst.rhs); const ptr_inst = Air.refToIndex(ptr).?; - assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { const iac = ptr_val.castTag(.inferred_alloc_comptime).?; return sema.storeToInferredAllocComptime(block, src, operand, iac); @@ -6988,16 +7009,7 @@ fn analyzeCall( const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { if (mod.memoized_calls.getContext(memoized_call_key, .{ .module = mod })) |result| { - const ty_inst = try sema.addType(fn_ret_ty); - try sema.air_values.append(gpa, result.val); - sema.air_instructions.set(block_inst, .{ - .tag = .constant, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), - } }, - }); - break :res2 Air.indexToRef(block_inst); + break :res2 try sema.addConstant(fn_ret_ty, result.val); } } @@ -9407,7 +9419,7 @@ fn zirParam( if (is_comptime) { // If this is a comptime parameter we can add a constant generic_poison // since this is also a generic parameter. - const result = try sema.addConstant(param_ty, Value.generic_poison); + const result = try sema.addConstant(Type.generic_poison, Value.generic_poison); sema.inst_map.putAssumeCapacityNoClobber(inst, result); } else { // Otherwise we need a dummy runtime instruction. @@ -15104,7 +15116,7 @@ fn analyzePtrArithmetic( if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(ptr_ty, offset_int, sema.mod); + const new_ptr_val = try ptr_val.elemPtr(new_ptr_ty, offset_int, sema.mod); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -25378,8 +25390,8 @@ fn elemPtrOneLayerOnly( const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, index, mod); const result_ty = try sema.elemPtrType(indexable_ty, index); + const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod); return sema.addConstant(result_ty, elem_ptr); }; const result_ty = try sema.elemPtrType(indexable_ty, null); @@ -25424,8 +25436,9 @@ fn elemVal( const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; const index = @intCast(usize, index_val.toUnsignedInt(mod)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, index, mod); - if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { + const elem_ptr_ty = try sema.elemPtrType(indexable_ty, index); + const elem_ptr_val = try indexable_val.elemPtr(elem_ptr_ty, index, mod); + if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return sema.addConstant(indexable_ty.elemType2(mod), elem_val); } break :rs indexable_src; @@ -25684,7 +25697,7 @@ fn elemPtrArray( return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, index, mod); + const elem_ptr = try array_ptr_val.elemPtr(elem_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } @@ -25740,8 +25753,9 @@ fn elemValSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); - if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { + const elem_ptr_ty = try sema.elemPtrType(slice_ty, index); + const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod); + if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } runtime_src = slice_src; @@ -25800,7 +25814,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, index, mod); + const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -25916,7 +25930,10 @@ fn coerceExtra( // null to ?T if (inst_ty.zigTypeTag(mod) == .Null) { - return sema.addConstant(dest_ty, Value.null); + return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ + .ty = dest_ty.toIntern(), + .val = .none, + } })).toValue()); } // cast from ?*T and ?[*]T to ?*anyopaque @@ -27665,43 +27682,40 @@ fn storePtrVal( switch (mut_kit.pointee) { .direct => |val_ptr| { if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) { - if (!operand_val.eql(val_ptr.*, operand_ty, sema.mod)) { + if (!operand_val.eql(val_ptr.*, operand_ty, mod)) { // TODO use failWithInvalidComptimeFieldStore return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{}); } return; } - const arena = mut_kit.beginArena(sema.mod); - defer mut_kit.finishArena(sema.mod); - - val_ptr.* = try operand_val.copy(arena); + val_ptr.* = (try operand_val.intern(operand_ty, mod)).toValue(); }, .reinterpret => |reinterpret| { const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); - reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { + reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, mod, buffer) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { + operand_val.writeToMemory(operand_ty, mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - const arena = mut_kit.beginArena(sema.mod); - defer mut_kit.finishArena(sema.mod); + const arena = mut_kit.beginArena(mod); + defer mut_kit.finishArena(mod); - reinterpret.val_ptr.* = try Value.readFromMemory(mut_kit.ty, sema.mod, buffer, arena); + reinterpret.val_ptr.* = (try (try Value.readFromMemory(mut_kit.ty, mod, buffer, arena)).intern(mut_kit.ty, mod)).toValue(); }, .bad_decl_ty, .bad_ptr_ty => { // TODO show the decl declaration site in a note and explain whether the decl // or the pointer is the problematic type - return sema.fail(block, src, "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", .{mut_kit.ty.fmt(sema.mod)}); + return sema.fail(block, src, "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", .{mut_kit.ty.fmt(mod)}); }, } } @@ -27754,7 +27768,7 @@ fn beginComptimePtrMutation( const mod = sema.mod; const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; switch (ptr.addr) { - .decl => unreachable, // isComptimeMutablePtr has been checked already + .decl, .int => unreachable, // isComptimeMutablePtr has been checked already .mut_decl => |mut_decl| { const decl = mod.declPtr(mut_decl.decl); return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl); @@ -27767,546 +27781,472 @@ fn beginComptimePtrMutation( .runtime_index = .comptime_field_ptr, }); }, - else => unreachable, - } - if (true) unreachable; - switch (ptr_val.toIntern()) { - .none => switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; - const decl = sema.mod.declPtr(decl_ref_mut.decl_index); - return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); - }, - .comptime_field_ptr => { - const payload = ptr_val.castTag(.comptime_field_ptr).?.data; - const duped = try sema.arena.create(Value); - duped.* = payload.field_val; - return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ - .decl_index = @intToEnum(Module.Decl.Index, 0), - .runtime_index = .comptime_field_ptr, - }); - }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); - - switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { - .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(mod); - if (elem_ptr.index >= check_len) { - // TODO have the parent include the decl so we can say "declared here" - return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ - elem_ptr.index, check_len, - }); - } - const elem_ty = parent.ty.childType(mod); - - // We might have a pointer to multiple elements of the array (e.g. a pointer - // to a sub-array). In this case, we just have to reinterpret the relevant - // bytes of the whole array rather than any single element. - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return .{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - } + .eu_payload => |eu_ptr| { + const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); + var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.toValue(), eu_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.errorUnionPayload(mod); + if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .ty = payload_ty, + }; + } else { + // An error union has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the error union from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .eu_payload }, + .data = Value.undef, + }; - switch (val_ptr.toIntern()) { - .undef => { - // An array has been initialized to undefined at comptime and now we - // are for the first time setting an element. We must change the representation - // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + val_ptr.* = Value.initPayload(&payload.base); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.undef); + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = eu_ty, + }, + } + }, + .opt_payload => |opt_ptr| { + const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); + var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.toValue(), opt_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.optionalChild(mod); + switch (val_ptr.ip_index) { + .undef, .null_value => { + // An optional has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the optional from `undef` to `opt_payload`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + const payload = try arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .opt_payload }, + .data = Value.undef, + }; - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try mod.intValue(elem_ty, bytes[i]); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .str_lit => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `str_lit` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (bytes, 0..) |byte, i| { - elems[i] = try mod.intValue(elem_ty, byte); - } - if (parent.ty.sentinel(mod)) |sent_val| { - assert(elems.len == bytes.len + 1); - elems[bytes.len] = sent_val; - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - if (elems.len > 0) elems[0] = repeated_val; - for (elems[1..]) |*elem| { - elem.* = try repeated_val.copy(arena); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ), + val_ptr.* = Value.initPayload(&payload.base); - .the_only_possible_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; + }, + .none => switch (val_ptr.tag()) { + .opt_payload => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, - else => unreachable, - }, - else => unreachable, - } + else => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, + }, }, - else => { - if (elem_ptr.index != 0) { - // TODO include a "declared here" note for the decl - return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ - elem_ptr.index, - }); - } - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty, - val_ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ); + else => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = val_ptr }, + .ty = payload_ty, }, - }, - .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout(mod)) { - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = elem_ptr.elem_ty, + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = opt_ty, + }, + } + }, + .elem => |elem_ptr| { + const base_elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); + var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.base.toValue(), base_elem_ty); + + switch (parent.pointee) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { + .Array, .Vector => { + const check_len = parent.ty.arrayLenIncludingSentinel(mod); + if (elem_ptr.index >= check_len) { + // TODO have the parent include the decl so we can say "declared here" + return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ + elem_ptr.index, check_len, + }); + } + const elem_ty = parent.ty.childType(mod); + + // We might have a pointer to multiple elements of the array (e.g. a pointer + // to a sub-array). In this case, we just have to reinterpret the relevant + // bytes of the whole array rather than any single element. + const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); + if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return .{ + .mut_decl = parent.mut_decl, + .pointee = .{ .reinterpret = .{ + .val_ptr = val_ptr, + .byte_offset = elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, }; } - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); + switch (val_ptr.ip_index) { + .undef => { + // An array has been initialized to undefined at comptime and now we + // are for the first time setting an element. We must change the representation + // of the array from `undef` to `array`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); - var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| switch (val_ptr.toIntern()) { - .undef => { - // A struct or union has been initialized to undefined at comptime and now we - // are for the first time setting a field. We must change the representation - // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + @memset(elems, Value.undef); - switch (parent.ty.zigTypeTag(mod)) { - .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(fields, Value.undef); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - val_ptr.* = try Value.Tag.aggregate.create(arena, fields); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[elem_ptr.index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .none => switch (val_ptr.tag()) { + .bytes => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `bytes` tag, and handle it without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const bytes = val_ptr.castTag(.bytes).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + // bytes.len may be one greater than dest_len because of the case when + // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. + assert(bytes.len >= dest_len); + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (elems, 0..) |*elem, i| { + elem.* = try mod.intValue(elem_ty, bytes[i]); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); return beginComptimePtrMutationInner( sema, block, src, - parent.ty.structFieldType(field_index, mod), - &fields[field_index], + elem_ty, + &elems[elem_ptr.index], ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .Union => { - const payload = try arena.create(Value.Payload.Union); - const tag_ty = parent.ty.unionTagTypeHypothetical(mod); - payload.* = .{ .data = .{ - .tag = try mod.enumValueFieldIndex(tag_ty, field_index), - .val = Value.undef, - } }; + .repeated => { + // An array is memory-optimized to store only a single element value, and + // that value is understood to be the same for the entire length of the array. + // However, now we want to modify an individual field and so the + // representation has to change. If we wanted to avoid this, there would + // need to be special detection elsewhere to identify when writing a value to an + // array element that is stored using the `repeated` tag, and handle it + // without making a call to this function. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + if (elems.len > 0) elems[0] = repeated_val; + for (elems[1..]) |*elem| { + elem.* = try repeated_val.copy(arena); + } - val_ptr.* = Value.initPayload(&payload.base); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); return beginComptimePtrMutationInner( sema, block, src, - parent.ty.structFieldType(field_index, mod), - &payload.data.val, + elem_ty, + &elems[elem_ptr.index], ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .Pointer => { - assert(parent.ty.isSlice(mod)); - val_ptr.* = try Value.Tag.slice.create(arena, .{ - .ptr = Value.undef, - .len = Value.undef, - }); - - switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - else => unreachable, - } - }, + + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], + ptr_elem_ty, + parent.mut_decl, + ), + else => unreachable, - } - }, - .empty_struct => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .none => switch (val_ptr.tag()) { - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - .repeated => { - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + }, + else => unreachable, + } + }, + else => { + if (elem_ptr.index != 0) { + // TODO include a "declared here" note for the decl + return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ + elem_ptr.index, + }); + } + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty, + val_ptr, + ptr_elem_ty, + parent.mut_decl, + ); + }, + }, + .reinterpret => |reinterpret| { + if (!base_elem_ty.hasWellDefinedLayout(mod)) { + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = base_elem_ty, + }; + } - const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(elems, val_ptr.castTag(.repeated).?.data); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); + const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, + } }, + .ty = parent.ty, + }; + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } + }, + .field => |field_ptr| { + const base_child_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + const field_index = @intCast(u32, field_ptr.index); + + var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty); + switch (parent.pointee) { + .direct => |val_ptr| switch (val_ptr.ip_index) { + .undef => { + // A struct or union has been initialized to undefined at comptime and now we + // are for the first time setting a field. We must change the representation + // of the struct/union from `undef` to `struct`/`union`. + const arena = parent.beginArena(sema.mod); + defer parent.finishArena(sema.mod); + + switch (parent.ty.zigTypeTag(mod)) { + .Struct => { + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(fields, Value.undef); + + val_ptr.* = try Value.Tag.aggregate.create(arena, fields); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &elems[field_index], + &fields[field_index], ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .@"union" => { - // We need to set the active field of the union. - const union_tag_ty = field_ptr.container_ty.unionTagTypeHypothetical(mod); + .Union => { + const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); + payload.* = .{ .data = .{ + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), + .val = Value.undef, + } }; - const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); + val_ptr.* = Value.initPayload(&payload.base); return beginComptimePtrMutationInner( sema, block, src, parent.ty.structFieldType(field_index, mod), - &payload.val, + &payload.data.val, ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, - .slice => switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), + .Pointer => { + assert(parent.ty.isSlice(mod)); + val_ptr.* = try Value.Tag.slice.create(arena, .{ + .ptr = Value.undef, + .len = Value.undef, + }); - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), + switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), - else => unreachable, + else => unreachable, + } }, - else => unreachable, - }, - else => unreachable, + } }, - .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, mod); - const field_offset = try sema.usizeCast(block, src, field_offset_u64); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + field_offset, - } }, - .ty = parent.ty, - }; + .empty_struct => { + const duped = try sema.arena.create(Value); + duped.* = val_ptr.*; + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + duped, + ptr_elem_ty, + parent.mut_decl, + ); }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .eu_payload_ptr => { - const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(mod); - if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, - .ty = payload_ty, - }; - } else { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. + .none => switch (val_ptr.tag()) { + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &val_ptr.castTag(.aggregate).?.data[field_index], + ptr_elem_ty, + parent.mut_decl, + ), + .repeated => { const arena = parent.beginArena(sema.mod); defer parent.finishArena(sema.mod); - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.undef, - }; + const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(elems, val_ptr.castTag(.repeated).?.data); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - val_ptr.* = Value.initPayload(&payload.base); + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &elems[field_index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .@"union" => { + // We need to set the active field of the union. + const union_tag_ty = base_child_ty.unionTagTypeHypothetical(mod); - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = eu_ptr.container_ty, - }, - } - }, - .opt_payload_ptr => { - const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, ptr_elem_ty.optionalChild(mod)); - }; - var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.optionalChild(mod); - switch (val_ptr.toIntern()) { - .undef, .null_value => { - // An optional has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the optional from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const payload = &val_ptr.castTag(.@"union").?.data; + payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .opt_payload }, - .data = Value.undef, - }; + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &payload.val, + ptr_elem_ty, + parent.mut_decl, + ); + }, + .slice => switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), - val_ptr.* = Value.initPayload(&payload.base); + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - }, - .none => switch (val_ptr.tag()) { - .opt_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, - }, + else => unreachable, + }, - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - }, - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = opt_ptr.container_ty, + else => unreachable, }, - } - }, - .decl_ref => unreachable, // isComptimeMutablePtr has been checked already - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr) { - else => unreachable, + else => unreachable, + }, + .reinterpret => |reinterpret| { + const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod); + const field_offset = try sema.usizeCast(block, src, field_offset_u64); + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .reinterpret = .{ + .val_ptr = reinterpret.val_ptr, + .byte_offset = reinterpret.byte_offset + field_offset, + } }, + .ty = parent.ty, + }; + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + } }, } } @@ -28418,6 +28358,7 @@ fn beginComptimePtrLoad( .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, }; + const is_mutable = ptr.addr == .mut_decl; const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); if (decl.getVariable(mod) != null) return error.RuntimeLoad; @@ -28426,7 +28367,7 @@ fn beginComptimePtrLoad( break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, .pointee = decl_tv, - .is_mutable = false, + .is_mutable = is_mutable, .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, }; }, @@ -29411,7 +29352,7 @@ fn analyzeDeclVal( const decl_ref = try sema.analyzeDeclRefInner(decl_index, false); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (Air.refToIndex(result)) |index| { - if (sema.air_instructions.items(.tag)[index] == .constant and !block.is_typeof) { + if (sema.air_instructions.items(.tag)[index] == .interned and !block.is_typeof) { try sema.decl_val_table.put(sema.gpa, decl_index, result); } } @@ -30049,8 +29990,8 @@ fn analyzeSlice( const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const elem_ptr = try ptr_val.elemPtr(new_ptr_ty, sentinel_index, sema.mod); - const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false); + const elem_ptr = try ptr_val.elemPtr(try sema.elemPtrType(new_ptr_ty, sentinel_index), sentinel_index, sema.mod); + const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, .val => |v| v, @@ -33421,35 +33362,24 @@ fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { } pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; - if (val.ip_index != .none) { - if (@enumToInt(val.toIntern()) < Air.ref_start_index) - return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); - try sema.air_instructions.append(gpa, .{ - .tag = .interned, - .data = .{ .interned = val.toIntern() }, - }); - const result = Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); - // This assertion can be removed when the `ty` parameter is removed from - // this function thanks to the InternPool transition being complete. - if (std.debug.runtime_safety) { - const val_ty = sema.typeOf(result); - if (!Type.eql(val_ty, ty, sema.mod)) { - std.debug.panic("addConstant type mismatch: '{}' vs '{}'\n", .{ - ty.fmt(sema.mod), val_ty.fmt(sema.mod), - }); - } + + // This assertion can be removed when the `ty` parameter is removed from + // this function thanks to the InternPool transition being complete. + if (std.debug.runtime_safety) { + const val_ty = mod.intern_pool.typeOf(val.toIntern()); + if (ty.toIntern() != val_ty) { + std.debug.panic("addConstant type mismatch: '{}' vs '{}'\n", .{ + ty.fmt(mod), val_ty.toType().fmt(mod), + }); } - return result; } - const ty_inst = try sema.addType(ty); - try sema.air_values.append(gpa, val); + if (@enumToInt(val.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); try sema.air_instructions.append(gpa, .{ - .tag = .constant, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), - } }, + .tag = .interned, + .data = .{ .interned = val.toIntern() }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } @@ -33606,7 +33536,7 @@ pub fn analyzeAddressSpace( fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { const mod = sema.mod; const load_ty = ptr_ty.childType(mod); - const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty, true); + const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty); switch (res) { .runtime_load => return null, .val => |v| return v, @@ -33632,7 +33562,7 @@ const DerefResult = union(enum) { out_of_bounds: Type, }; -fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type, want_mutable: bool) CompileError!DerefResult { +fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type) CompileError!DerefResult { const mod = sema.mod; const target = mod.getTarget(); const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) { @@ -33647,13 +33577,8 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value if (coerce_in_mem_ok) { // We have a Value that lines up in virtual memory exactly with what we want to load, // and it is in-memory coercible to load_ty. It may be returned without modifications. - if (deref.is_mutable and want_mutable) { - // The decl whose value we are obtaining here may be overwritten with - // a different value upon further semantic analysis, which would - // invalidate this memory. So we must copy here. - return DerefResult{ .val = try tv.val.copy(sema.arena) }; - } - return DerefResult{ .val = tv.val }; + // Move mutable decl values to the InternPool and assert other decls are already in the InternPool. + return .{ .val = (if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern()).toValue() }; } } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index d82fb72dea..020686f86e 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -124,6 +124,60 @@ pub fn print( } return writer.writeAll(" }"); }, + .slice => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const payload = val.castTag(.slice).?.data; + const elem_ty = ty.elemType2(mod); + const len = payload.len.toUnsignedInt(mod); + + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @intCast(usize, std.math.min(len, max_string_len)); + var buf: [max_string_len]u8 = undefined; + + var i: u32 = 0; + while (i < max_len) : (i += 1) { + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; + if (elem_val.isUndef(mod)) break :str; + buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; + } + + // TODO would be nice if this had a bit of unicode awareness. + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + + try writer.writeAll(".{ "); + + const max_len = std.math.min(len, max_aggregate_items); + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; + try print(.{ + .ty = elem_ty, + .val = elem_val, + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + }, + .eu_payload => { + val = val.castTag(.eu_payload).?.data; + ty = ty.errorUnionPayload(mod); + }, + .opt_payload => { + val = val.castTag(.opt_payload).?.data; + ty = ty.optionalChild(mod); + return print(.{ .ty = ty, .val = val }, writer, level, mod); + }, // TODO these should not appear in this function .inferred_alloc => return writer.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 16b103c898..54a34e8f09 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -845,8 +845,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -919,8 +918,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -6155,15 +6153,15 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { }); switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index f0a44b72a8..8f1a8fdb67 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -829,8 +829,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -903,8 +902,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -6103,15 +6101,15 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { }); switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 7f4715a451..660630503d 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -659,8 +659,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -730,8 +729,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -2557,15 +2555,15 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { }); switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 9f44dc0e8a..c7376a6eb7 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -679,8 +679,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -4423,8 +4422,7 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -4553,15 +4551,15 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { if (Air.refToIndex(ref)) |inst| { switch (self.air.instructions.items(.tag)[inst]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const interned = self.air.instructions.items(.data)[inst].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 85fc8346f8..b4e627e957 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -883,7 +883,7 @@ fn iterateBigTomb(func: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !B fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void { const inst = Air.refToIndex(ref) orelse return; - if (func.air.instructions.items(.tag)[inst] == .constant) return; + assert(func.air.instructions.items(.tag)[inst] != .interned); // Branches are currently only allowed to free locals allocated // within their own branch. // TODO: Upon branch consolidation free any locals if needed. @@ -1832,8 +1832,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const air_tags = func.air.instructions.items(.tag); return switch (air_tags[inst]) { - .constant => unreachable, - .interned => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .add => func.airBinOp(inst, .add), .add_sat => func.airSatBinOp(inst, .add), diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index f2ac985844..48504dee8f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1922,8 +1922,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -2097,10 +2096,8 @@ fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - switch (self.air.instructions.items(.tag)[inst]) { - .constant => unreachable, - else => self.inst_tracking.getPtr(inst).?.die(self, inst), - } + assert(self.air.instructions.items(.tag)[inst] != .interned); + self.inst_tracking.getPtr(inst).?.die(self, inst); } /// Called when there are no operands, and the instruction is always unreferenced. @@ -2876,8 +2873,8 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { - .constant => { - const src_val = self.air.values[air_data[inst].ty_pl.payload]; + .interned => { + const src_val = air_data[inst].interned.toValue(); var space: Value.BigIntSpace = undefined; const src_int = src_val.toBigInt(&space, mod); return @intCast(u16, src_int.bitCountTwosComp()) + @@ -11584,11 +11581,11 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { if (Air.refToIndex(ref)) |inst| { const mcv = switch (self.air.instructions.items(.tag)[inst]) { - .constant => tracking: { + .interned => tracking: { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = (try self.air.value(ref, mod)).?, + .val = self.air.instructions.items(.data)[inst].interned.toValue(), })); break :tracking gop.value_ptr; }, @@ -11605,7 +11602,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { const tracking = switch (self.air.instructions.items(.tag)[inst]) { - .constant => &self.const_tracking, + .interned => &self.const_tracking, else => &self.inst_tracking, }.getPtr(inst).?; return switch (tracking.short) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 76533b4284..f97292e510 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -2890,8 +2890,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, const result_value = switch (air_tags[inst]) { // zig fmt: off - .constant => unreachable, // excluded from function bodies - .interned => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .arg => try airArg(f, inst), @@ -7783,8 +7782,8 @@ fn reap(f: *Function, inst: Air.Inst.Index, operands: []const Air.Inst.Ref) !voi fn die(f: *Function, inst: Air.Inst.Index, ref: Air.Inst.Ref) !void { const ref_inst = Air.refToIndex(ref) orelse return; + assert(f.air.instructions.items(.tag)[ref_inst] != .interned); const c_value = (f.value_map.fetchRemove(ref_inst) orelse return).value; - if (f.air.instructions.items(.tag)[ref_inst] == .constant) return; const local_index = switch (c_value) { .local, .new_local => |l| l, else => return, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 0c12faf751..46b126ad84 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -4530,8 +4530,7 @@ pub const FuncGen = struct { .vector_store_elem => try self.airVectorStoreElem(inst), - .constant => unreachable, - .interned => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.airUnreach(inst), .dbg_stmt => self.airDbgStmt(inst), diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 2b04e03a5a..1a19bbdf91 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1807,7 +1807,6 @@ pub const DeclGen = struct { .br => return self.airBr(inst), .breakpoint => return, .cond_br => return self.airCondBr(inst), - .constant => unreachable, .dbg_stmt => return self.airDbgStmt(inst), .loop => return self.airLoop(inst), .ret => return self.airRet(inst), diff --git a/src/print_air.zig b/src/print_air.zig index 58e4029543..204f5ddeb9 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -93,14 +93,10 @@ const Writer = struct { fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void { for (w.air.instructions.items(.tag), 0..) |tag, i| { + if (tag != .interned) continue; const inst = @intCast(Air.Inst.Index, i); - switch (tag) { - .constant, .interned => { - try w.writeInst(s, inst); - try s.writeByte('\n'); - }, - else => continue, - } + try w.writeInst(s, inst); + try s.writeByte('\n'); } } @@ -304,7 +300,7 @@ const Writer = struct { .struct_field_ptr => try w.writeStructField(s, inst), .struct_field_val => try w.writeStructField(s, inst), - .constant => try w.writeConstant(s, inst), + .inferred_alloc, .inferred_alloc_comptime => try w.writeConstant(s, inst), .interned => try w.writeInterned(s, inst), .assembly => try w.writeAssembly(s, inst), .dbg_stmt => try w.writeDbgStmt(s, inst), diff --git a/src/value.zig b/src/value.zig index 02f4422dda..9244e33ad5 100644 --- a/src/value.zig +++ b/src/value.zig @@ -35,6 +35,22 @@ pub const Value = struct { // The first section of this enum are tags that require no payload. // After this, the tag requires a payload. + /// When the type is error union: + /// * If the tag is `.@"error"`, the error union is an error. + /// * If the tag is `.eu_payload`, the error union is a payload. + /// * A nested error such as `anyerror!(anyerror!T)` in which the the outer error union + /// is non-error, but the inner error union is an error, is represented as + /// a tag of `.eu_payload`, with a sub-tag of `.@"error"`. + eu_payload, + /// When the type is optional: + /// * If the tag is `.null_value`, the optional is null. + /// * If the tag is `.opt_payload`, the optional is a payload. + /// * A nested optional such as `??T` in which the the outer optional + /// is non-null, but the inner optional is null, is represented as + /// a tag of `.opt_payload`, with a sub-tag of `.null_value`. + opt_payload, + /// Pointer and length as sub `Value` objects. + slice, /// A slice of u8 whose memory is managed externally. bytes, /// This value is repeated some number of times. The amount of times to repeat @@ -58,14 +74,16 @@ pub const Value = struct { pub fn Type(comptime t: Tag) type { return switch (t) { - .repeated => Payload.SubValue, - + .eu_payload, + .opt_payload, + .repeated, + => Payload.SubValue, + .slice => Payload.Slice, .bytes => Payload.Bytes, - - .inferred_alloc => Payload.InferredAlloc, - .inferred_alloc_comptime => Payload.InferredAllocComptime, .aggregate => Payload.Aggregate, .@"union" => Payload.Union, + .inferred_alloc => Payload.InferredAlloc, + .inferred_alloc_comptime => Payload.InferredAllocComptime, }; } @@ -172,7 +190,10 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, - .repeated => { + .eu_payload, + .opt_payload, + .repeated, + => { const payload = self.cast(Payload.SubValue).?; const new_payload = try arena.create(Payload.SubValue); new_payload.* = .{ @@ -184,6 +205,21 @@ pub const Value = struct { .legacy = .{ .ptr_otherwise = &new_payload.base }, }; }, + .slice => { + const payload = self.castTag(.slice).?; + const new_payload = try arena.create(Payload.Slice); + new_payload.* = .{ + .base = payload.base, + .data = .{ + .ptr = try payload.data.ptr.copy(arena), + .len = try payload.data.len.copy(arena), + }, + }; + return Value{ + .ip_index = .none, + .legacy = .{ .ptr_otherwise = &new_payload.base }, + }; + }, .aggregate => { const payload = self.castTag(.aggregate).?; const new_payload = try arena.create(Payload.Aggregate); @@ -263,6 +299,15 @@ pub const Value = struct { try out_stream.writeAll("(repeated) "); val = val.castTag(.repeated).?.data; }, + .eu_payload => { + try out_stream.writeAll("(eu_payload) "); + val = val.castTag(.repeated).?.data; + }, + .opt_payload => { + try out_stream.writeAll("(opt_payload) "); + val = val.castTag(.repeated).?.data; + }, + .slice => return out_stream.writeAll("(slice)"), .inferred_alloc => return out_stream.writeAll("(inferred allocation value)"), .inferred_alloc_comptime => return out_stream.writeAll("(inferred comptime allocation value)"), }; @@ -1653,13 +1698,18 @@ pub const Value = struct { .Null, .Struct, // It sure would be nice to do something clever with structs. => |zig_type_tag| std.hash.autoHash(hasher, zig_type_tag), + .Pointer => { + assert(ty.isSlice(mod)); + const slice = val.castTag(.slice).?.data; + const ptr_ty = ty.slicePtrFieldType(mod); + slice.ptr.hashUncoerced(ptr_ty, hasher, mod); + }, .Type, .Float, .ComptimeFloat, .Bool, .Int, .ComptimeInt, - .Pointer, .Fn, .Optional, .ErrorSet, @@ -1799,9 +1849,15 @@ pub const Value = struct { /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { - switch (val.toIntern()) { - .undef => return Value.undef, - else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { + return switch (val.ip_index) { + .undef => Value.undef, + .none => switch (val.tag()) { + .repeated => val.castTag(.repeated).?.data, + .aggregate => val.castTag(.aggregate).?.data[index], + .slice => val.castTag(.slice).?.data.ptr.elemValue(mod, index), + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), @@ -1829,7 +1885,7 @@ pub const Value = struct { }, else => unreachable, }, - } + }; } pub fn isLazyAlign(val: Value, mod: *Module) bool { @@ -1875,25 +1931,28 @@ pub const Value = struct { } pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| variable.is_threadlocal, - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl_index| { - const decl = mod.declPtr(decl_index); - assert(decl.has_tv); - return decl.val.isPtrToThreadLocal(mod); - }, - .mut_decl => |mut_decl| { - const decl = mod.declPtr(mut_decl.decl); - assert(decl.has_tv); - return decl.val.isPtrToThreadLocal(mod); + return switch (val.ip_index) { + .none => false, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| variable.is_threadlocal, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + assert(decl.has_tv); + return decl.val.isPtrToThreadLocal(mod); + }, + .int => false, + .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), + .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), + .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), }, - .int => false, - .eu_payload, .opt_payload => |base_ptr| base_ptr.toValue().isPtrToThreadLocal(mod), - .comptime_field => |comptime_field| comptime_field.toValue().isPtrToThreadLocal(mod), - .elem, .field => |base_index| base_index.base.toValue().isPtrToThreadLocal(mod), + else => false, }, - else => false, }; } @@ -1926,9 +1985,21 @@ pub const Value = struct { } pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { - switch (val.toIntern()) { - .undef => return Value.undef, - else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { + return switch (val.ip_index) { + .undef => Value.undef, + .none => switch (val.tag()) { + .aggregate => { + const field_values = val.castTag(.aggregate).?.data; + return field_values[index]; + }, + .@"union" => { + const payload = val.castTag(.@"union").?.data; + // TODO assert the tag is correct + return payload.val; + }, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try mod.intern(.{ .int = .{ .ty = .u8_type, @@ -1941,7 +2012,7 @@ pub const Value = struct { .un => |un| un.val.toValue(), else => unreachable, }, - } + }; } pub fn unionTag(val: Value, mod: *Module) Value { @@ -1956,36 +2027,17 @@ pub const Value = struct { /// Returns a pointer to the element value at the index. pub fn elemPtr( val: Value, - ty: Type, + elem_ptr_ty: Type, index: usize, mod: *Module, ) Allocator.Error!Value { - const elem_ty = ty.elemType2(mod); - const ptr_ty_key = mod.intern_pool.indexToKey(ty.toIntern()).ptr_type; - assert(ptr_ty_key.host_size == 0); - assert(ptr_ty_key.bit_offset == 0); - assert(ptr_ty_key.vector_index == .none); - const elem_alignment = InternPool.Alignment.fromByteUnits(elem_ty.abiAlignment(mod)); - const alignment = switch (ptr_ty_key.alignment) { - .none => .none, - else => ptr_ty_key.alignment.min( - @intToEnum(InternPool.Alignment, @ctz(index * elem_ty.abiSize(mod))), - ), - }; - const ptr_ty = try mod.ptrType(.{ - .elem_type = elem_ty.toIntern(), - .alignment = if (alignment == elem_alignment) .none else alignment, - .is_const = ptr_ty_key.is_const, - .is_volatile = ptr_ty_key.is_volatile, - .is_allowzero = ptr_ty_key.is_allowzero, - .address_space = ptr_ty_key.address_space, - }); + const elem_ty = elem_ptr_ty.childType(mod); const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| ptr: { switch (ptr.addr) { .elem => |elem| if (mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).eql(elem_ty, mod)) return (try mod.intern(.{ .ptr = .{ - .ty = ptr_ty.toIntern(), + .ty = elem_ptr_ty.toIntern(), .addr = .{ .elem = .{ .base = elem.base, .index = elem.index + index, @@ -2001,7 +2053,7 @@ pub const Value = struct { else => val, }; return (try mod.intern(.{ .ptr = .{ - .ty = ptr_ty.toIntern(), + .ty = elem_ptr_ty.toIntern(), .addr = .{ .elem = .{ .base = ptr_val.toIntern(), .index = index, @@ -4058,9 +4110,12 @@ pub const Value = struct { pub const Payload = struct { tag: Tag, - pub const SubValue = struct { + pub const Slice = struct { base: Payload, - data: Value, + data: struct { + ptr: Value, + len: Value, + }, }; pub const Bytes = struct { @@ -4069,6 +4124,11 @@ pub const Value = struct { data: []const u8, }; + pub const SubValue = struct { + base: Payload, + data: Value, + }; + pub const Aggregate = struct { base: Payload, /// Field values. The types are according to the struct or array type. @@ -4076,6 +4136,18 @@ pub const Value = struct { data: []Value, }; + pub const Union = struct { + pub const base_tag = Tag.@"union"; + + base: Payload = .{ .tag = base_tag }, + data: Data, + + pub const Data = struct { + tag: Value, + val: Value, + }; + }; + pub const InferredAlloc = struct { pub const base_tag = Tag.inferred_alloc; @@ -4110,18 +4182,6 @@ pub const Value = struct { alignment: u32, }, }; - - pub const Union = struct { - pub const base_tag = Tag.@"union"; - - base: Payload = .{ .tag = base_tag }, - data: Data, - - pub const Data = struct { - tag: Value, - val: Value, - }; - }; }; pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index 555cda135d..36e497afb9 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -682,4 +682,10 @@ def __lldb_init_module(debugger, _=None): add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True) add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True) add(debugger, category='zig.stage2', type='InternPool.Index', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Int.Storage', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.ErrorUnion.Value', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Float.Storage', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Ptr.Addr', identifier='zig_TaggedUnion', synth=True) + add(debugger, category='zig.stage2', type='InternPool.Key.Aggregate.Storage', identifier='zig_TaggedUnion', synth=True) add(debugger, category='zig.stage2', type='arch.x86_64.CodeGen.MCValue', identifier='zig_TaggedUnion', synth=True, inline_children=True, summary=True) -- cgit v1.2.3 From 9a738c0be54c9bda0e57de9da84f86fc73bd5198 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 26 May 2023 00:24:29 -0400 Subject: Module: intern the values of decls when they are marked alive I'm not sure if this is the right place for this to happen, and it should become obsolete when comptime mutation is rewritten and the remaining legacy value tags are remove, so keeping this as a separate revertable commit. --- src/Module.zig | 36 +++++++++++++++++++----------------- src/Sema.zig | 2 +- src/arch/wasm/CodeGen.zig | 4 ++-- src/codegen.zig | 4 ++-- src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 6 +++--- src/codegen/spirv.zig | 2 +- 7 files changed, 29 insertions(+), 27 deletions(-) (limited to 'src/arch') diff --git a/src/Module.zig b/src/Module.zig index d3045631c5..76e2142ae6 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6603,47 +6603,49 @@ fn reportRetryableFileError( gop.value_ptr.* = err_msg; } -pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { +pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void { switch (mod.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| mod.markDeclIndexAlive(variable.decl), - .extern_func => |extern_func| mod.markDeclIndexAlive(extern_func.decl), - .func => |func| mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .variable => |variable| try mod.markDeclIndexAlive(variable.decl), + .extern_func => |extern_func| try mod.markDeclIndexAlive(extern_func.decl), + .func => |func| try mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), .error_union => |error_union| switch (error_union.val) { .err_name => {}, - .payload => |payload| mod.markReferencedDeclsAlive(payload.toValue()), + .payload => |payload| try mod.markReferencedDeclsAlive(payload.toValue()), }, .ptr => |ptr| { switch (ptr.addr) { - .decl => |decl| mod.markDeclIndexAlive(decl), - .mut_decl => |mut_decl| mod.markDeclIndexAlive(mut_decl.decl), + .decl => |decl| try mod.markDeclIndexAlive(decl), + .mut_decl => |mut_decl| try mod.markDeclIndexAlive(mut_decl.decl), .int, .comptime_field => {}, - .eu_payload, .opt_payload => |parent| mod.markReferencedDeclsAlive(parent.toValue()), - .elem, .field => |base_index| mod.markReferencedDeclsAlive(base_index.base.toValue()), + .eu_payload, .opt_payload => |parent| try mod.markReferencedDeclsAlive(parent.toValue()), + .elem, .field => |base_index| try mod.markReferencedDeclsAlive(base_index.base.toValue()), } - if (ptr.len != .none) mod.markReferencedDeclsAlive(ptr.len.toValue()); + if (ptr.len != .none) try mod.markReferencedDeclsAlive(ptr.len.toValue()); }, - .opt => |opt| if (opt.val != .none) mod.markReferencedDeclsAlive(opt.val.toValue()), + .opt => |opt| if (opt.val != .none) try mod.markReferencedDeclsAlive(opt.val.toValue()), .aggregate => |aggregate| for (aggregate.storage.values()) |elem| - mod.markReferencedDeclsAlive(elem.toValue()), + try mod.markReferencedDeclsAlive(elem.toValue()), .un => |un| { - mod.markReferencedDeclsAlive(un.tag.toValue()); - mod.markReferencedDeclsAlive(un.val.toValue()); + try mod.markReferencedDeclsAlive(un.tag.toValue()); + try mod.markReferencedDeclsAlive(un.val.toValue()); }, else => {}, } } -pub fn markDeclAlive(mod: *Module, decl: *Decl) void { +pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void { if (decl.alive) return; decl.alive = true; + decl.val = (try decl.val.intern(decl.ty, mod)).toValue(); + // This is the first time we are marking this Decl alive. We must // therefore recurse into its value and mark any Decl it references // as also alive, so that any Decl referenced does not get garbage collected. - mod.markReferencedDeclsAlive(decl.val); + try mod.markReferencedDeclsAlive(decl.val); } -fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) void { +fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { return mod.markDeclAlive(mod.declPtr(decl_index)); } diff --git a/src/Sema.zig b/src/Sema.zig index 5395bb63d1..e9c4958918 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5807,7 +5807,7 @@ pub fn analyzeExport( } // This decl is alive no matter what, since it's being exported - mod.markDeclAlive(exported_decl); + try mod.markDeclAlive(exported_decl); try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); const gpa = sema.gpa; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b4e627e957..d9cb56404a 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3019,7 +3019,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { const mod = func.bin_file.base.options.module.?; const decl = mod.declPtr(decl_index); - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const ptr_ty = try mod.singleMutPtrType(decl.ty); return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset); } @@ -3035,7 +3035,7 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind return WValue{ .imm32 = 0xaaaaaaaa }; } - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index); const atom = func.bin_file.getAtom(atom_index); diff --git a/src/codegen.zig b/src/codegen.zig index f343f0441d..87aea6c245 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -673,7 +673,7 @@ fn lowerDeclRef( return Result.ok; } - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const vaddr = try bin_file.getDeclVAddr(decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, @@ -782,7 +782,7 @@ fn genDeclRef( } } - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const is_threadlocal = tv.val.isPtrToThreadLocal(mod) and !bin_file.options.single_threaded; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f97292e510..2dcc332713 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1923,7 +1923,7 @@ pub const DeclGen = struct { fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index, export_index: u32) !void { const mod = dg.module; const decl = mod.declPtr(decl_index); - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); if (mod.decl_exports.get(decl_index)) |exports| { try writer.writeAll(exports.items[export_index].options.name); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 46b126ad84..936b1d847a 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3252,7 +3252,7 @@ pub const DeclGen = struct { else => unreachable, }; const fn_decl = dg.module.declPtr(fn_decl_index); - dg.module.markDeclAlive(fn_decl); + try dg.module.markDeclAlive(fn_decl); return dg.resolveLlvmFunction(fn_decl_index); }, .int => |int| { @@ -3831,7 +3831,7 @@ pub const DeclGen = struct { ) Error!*llvm.Value { const mod = dg.module; const decl = mod.declPtr(decl_index); - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const ptr_ty = try mod.singleMutPtrType(decl.ty); return try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index); } @@ -4006,7 +4006,7 @@ pub const DeclGen = struct { return self.lowerPtrToVoid(tv.ty); } - mod.markDeclAlive(decl); + try mod.markDeclAlive(decl); const llvm_decl_val = if (is_fn_body) try self.resolveLlvmFunction(decl_index) diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 1a19bbdf91..43b6741493 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -256,7 +256,7 @@ pub const DeclGen = struct { /// Note: Function does not actually generate the decl. fn resolveDecl(self: *DeclGen, decl_index: Module.Decl.Index) !SpvModule.Decl.Index { const decl = self.module.declPtr(decl_index); - self.module.markDeclAlive(decl); + try self.module.markDeclAlive(decl); const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { -- cgit v1.2.3 From 66c43968546e38879a2d4c3f2264e10676deef73 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 25 May 2023 23:04:15 -0700 Subject: AIR: eliminate the `values` array --- src/Air.zig | 16 +++++++++------- src/Module.zig | 1 - src/Sema.zig | 27 +++++++++------------------ src/arch/aarch64/CodeGen.zig | 4 ++-- src/arch/arm/CodeGen.zig | 4 ++-- src/arch/riscv64/CodeGen.zig | 4 ++-- src/arch/sparc64/CodeGen.zig | 4 ++-- src/arch/wasm/CodeGen.zig | 2 +- src/arch/x86_64/CodeGen.zig | 4 ++-- src/codegen/c.zig | 6 +++--- src/codegen/llvm.zig | 10 +++++----- src/codegen/spirv.zig | 2 +- src/print_air.zig | 22 ++++++---------------- 13 files changed, 44 insertions(+), 62 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index e6cfc8c116..56f7d4cf01 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -17,7 +17,6 @@ instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. /// The first few indexes are reserved. See `ExtraIndex` for the values. extra: []const u32, -values: []const Value, pub const ExtraIndex = enum(u32) { /// Payload index of the main `Block` in the `extra` array. @@ -421,10 +420,10 @@ pub const Inst = struct { /// Marks the end of a semantic scope for debug info variables. dbg_block_end, /// Marks the start of an inline call. - /// Uses `ty_pl` with the payload being the index of a Value.Function in air.values. + /// Uses the `ty_fn` field. dbg_inline_begin, /// Marks the end of an inline call. - /// Uses `ty_pl` with the payload being the index of a Value.Function in air.values. + /// Uses the `ty_fn` field. dbg_inline_end, /// Marks the beginning of a local variable. The operand is a pointer pointing /// to the storage for the variable. The local may be a const or a var. @@ -967,6 +966,10 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + ty_fn: struct { + ty: Ref, + func: Module.Fn.Index, + }, br: struct { block_inst: Index, operand: Ref, @@ -1090,8 +1093,7 @@ pub const FieldParentPtr = struct { pub const Shuffle = struct { a: Inst.Ref, b: Inst.Ref, - // index to air_values - mask: u32, + mask: InternPool.Index, mask_len: u32, }; @@ -1469,7 +1471,8 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end u32 => air.extra[i], Inst.Ref => @intToEnum(Inst.Ref, air.extra[i]), i32 => @bitCast(i32, air.extra[i]), - else => @compileError("bad field type"), + InternPool.Index => @intToEnum(InternPool.Index, air.extra[i]), + else => @compileError("bad field type: " ++ @typeName(field.type)), }; i += 1; } @@ -1482,7 +1485,6 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { air.instructions.deinit(gpa); gpa.free(air.extra); - gpa.free(air.values); air.* = undefined; } diff --git a/src/Module.zig b/src/Module.zig index 76e2142ae6..3dd89f1269 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5720,7 +5720,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE return Air{ .instructions = sema.air_instructions.toOwnedSlice(), .extra = try sema.air_extra.toOwnedSlice(gpa), - .values = try sema.air_values.toOwnedSlice(gpa), }; } diff --git a/src/Sema.zig b/src/Sema.zig index b0d36c4699..0034810846 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -17,7 +17,6 @@ perm_arena: Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, -air_values: std.ArrayListUnmanaged(Value) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -772,7 +771,6 @@ pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; sema.air_instructions.deinit(gpa); sema.air_extra.deinit(gpa); - sema.air_values.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); sema.types_to_resolve.deinit(gpa); @@ -2018,10 +2016,8 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( } const air_datas = sema.air_instructions.items(.data); const val = switch (air_tags[i]) { - .inferred_alloc, .inferred_alloc_comptime => val: { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; - break :val sema.air_values.items[ty_pl.payload]; - }, + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, .interned => air_datas[i].interned.toValue(), else => return null, }; @@ -7930,20 +7926,17 @@ fn emitDbgInline( new_func_ty: Type, tag: Air.Inst.Tag, ) CompileError!void { - if (sema.mod.comp.bin_file.options.strip) return; + const mod = sema.mod; + if (mod.comp.bin_file.options.strip) return; // Recursive inline call; no dbg_inline needed. if (old_func == new_func) return; - try sema.air_values.append(sema.gpa, (try sema.mod.intern(.{ .func = .{ - .ty = new_func_ty.toIntern(), - .index = new_func, - } })).toValue()); _ = try block.addInst(.{ .tag = tag, - .data = .{ .ty_pl = .{ + .data = .{ .ty_fn = .{ .ty = try sema.addType(new_func_ty), - .payload = @intCast(u32, sema.air_values.items.len - 1), + .func = new_func, } }, }); } @@ -21724,8 +21717,6 @@ fn analyzeShuffle( } } - const mask_index = @intCast(u32, sema.air_values.items.len); - try sema.air_values.append(sema.gpa, mask); return block.addInst(.{ .tag = .shuffle, .data = .{ .ty_pl = .{ @@ -21733,7 +21724,7 @@ fn analyzeShuffle( .payload = try block.sema.addExtra(Air.Shuffle{ .a = a, .b = b, - .mask = mask_index, + .mask = mask.toIntern(), .mask_len = mask_len, }), } }, @@ -33311,7 +33302,6 @@ pub fn getTmpAir(sema: Sema) Air { return .{ .instructions = sema.air_instructions.slice(), .extra = sema.air_extra.items, - .values = sema.air_values.items, }; } @@ -33371,7 +33361,8 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { u32 => @field(extra, field.name), Air.Inst.Ref => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), - else => @compileError("bad field type"), + InternPool.Index => @enumToInt(@field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }); } return result; diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 54a34e8f09..3afb510d43 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4621,9 +4621,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 8f1a8fdb67..5f476a2e80 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4568,9 +4568,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 660630503d..5417650dd5 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1875,9 +1875,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index c7376a6eb7..354af50b61 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1660,9 +1660,9 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d9cb56404a..0c77197417 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4947,7 +4947,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const a = try func.resolveInst(extra.a); const b = try func.resolveInst(extra.b); - const mask = func.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; const child_ty = inst_ty.childType(mod); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 48504dee8f..00f5b3f3da 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8541,9 +8541,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = self.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .unreach, .{ .none, .none, .none }); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 2dcc332713..59d00f5849 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -4302,10 +4302,10 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { } fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { - const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = f.air.instructions.items(.data)[inst].ty_fn; const mod = f.object.dg.module; const writer = f.object.writer(); - const function = f.air.values[ty_pl.payload].getFunction(mod).?; + const function = mod.funcPtr(ty_fn.func); try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); return .none; } @@ -6612,7 +6612,7 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Shuffle, ty_pl.payload).data; - const mask = f.air.values[extra.mask]; + const mask = extra.mask.toValue(); const lhs = try f.resolveInst(extra.a); const rhs = try f.resolveInst(extra.b); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index bbedc1160c..dd07b5edbd 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -5927,10 +5927,10 @@ pub const FuncGen = struct { fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { const dib = self.dg.object.di_builder orelse return null; - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.dg.module; - const func = self.air.values[ty_pl.payload].getFunction(mod).?; + const func = mod.funcPtr(ty_fn.func); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); @@ -5986,10 +5986,10 @@ pub const FuncGen = struct { fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value { if (self.dg.object.di_builder == null) return null; - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.dg.module; - const func = self.air.values[ty_pl.payload].getFunction(mod).?; + const func = mod.funcPtr(ty_fn.func); const decl = mod.declPtr(func.owner_decl); const di_file = try self.dg.object.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; @@ -8875,7 +8875,7 @@ pub const FuncGen = struct { const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolveInst(extra.a); const b = try self.resolveInst(extra.b); - const mask = self.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; const a_len = self.typeOf(extra.a).vectorLen(mod); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 43b6741493..80e98dbcd3 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -2074,7 +2074,7 @@ pub const DeclGen = struct { const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try self.resolve(extra.a); const b = try self.resolve(extra.b); - const mask = self.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; const a_len = self.typeOf(extra.a).vectorLen(mod); diff --git a/src/print_air.zig b/src/print_air.zig index 800fbc43c2..be7bc9610d 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -15,12 +15,11 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo // the debug safety tag but we want to measure release size. (@sizeOf(Air.Inst.Tag) + 8); const extra_bytes = air.extra.len * @sizeOf(u32); - const values_bytes = air.values.len * @sizeOf(Value); const tomb_bytes = if (liveness) |l| l.tomb_bits.len * @sizeOf(usize) else 0; const liveness_extra_bytes = if (liveness) |l| l.extra.len * @sizeOf(u32) else 0; const liveness_special_bytes = if (liveness) |l| l.special.count() * 8 else 0; const total_bytes = @sizeOf(Air) + instruction_bytes + extra_bytes + - values_bytes + @sizeOf(Liveness) + liveness_extra_bytes + + @sizeOf(Liveness) + liveness_extra_bytes + liveness_special_bytes + tomb_bytes; // zig fmt: off @@ -28,7 +27,6 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo \\# Total AIR+Liveness bytes: {} \\# AIR Instructions: {d} ({}) \\# AIR Extra Data: {d} ({}) - \\# AIR Values Bytes: {d} ({}) \\# Liveness tomb_bits: {} \\# Liveness Extra Data: {d} ({}) \\# Liveness special table: {d} ({}) @@ -37,7 +35,6 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo fmtIntSizeBin(total_bytes), air.instructions.len, fmtIntSizeBin(instruction_bytes), air.extra.len, fmtIntSizeBin(extra_bytes), - air.values.len, fmtIntSizeBin(values_bytes), fmtIntSizeBin(tomb_bytes), if (liveness) |l| l.extra.len else 0, fmtIntSizeBin(liveness_extra_bytes), if (liveness) |l| l.special.count() else 0, fmtIntSizeBin(liveness_special_bytes), @@ -300,7 +297,8 @@ const Writer = struct { .struct_field_ptr => try w.writeStructField(s, inst), .struct_field_val => try w.writeStructField(s, inst), - .inferred_alloc, .inferred_alloc_comptime => try w.writeConstant(s, inst), + .inferred_alloc => @panic("TODO"), + .inferred_alloc_comptime => @panic("TODO"), .interned => try w.writeInterned(s, inst), .assembly => try w.writeAssembly(s, inst), .dbg_stmt => try w.writeDbgStmt(s, inst), @@ -598,14 +596,6 @@ const Writer = struct { try s.print(", {d}", .{extra.field_index}); } - fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const val = w.air.values[ty_pl.payload]; - const ty = w.air.getRefType(ty_pl.ty); - try w.writeType(s, ty); - try s.print(", {}", .{val.fmtValue(ty, w.module)}); - } - fn writeInterned(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const mod = w.module; const ip_index = w.air.instructions.items(.data)[inst].interned; @@ -693,9 +683,9 @@ const Writer = struct { } fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; - const func_index = w.module.intern_pool.indexToFunc(w.air.values[ty_pl.payload].ip_index); - const owner_decl = w.module.declPtr(w.module.funcPtrUnwrap(func_index).?.owner_decl); + const ty_fn = w.air.instructions.items(.data)[inst].ty_fn; + const func_index = ty_fn.func; + const owner_decl = w.module.declPtr(w.module.funcPtr(func_index).owner_decl); try s.print("{s}", .{owner_decl.name}); } -- cgit v1.2.3 From 63dc0447fc4654324ef8efcfa65849f7ef682531 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 26 May 2023 13:21:35 -0700 Subject: wasm: fix error union constant lowering --- src/arch/wasm/CodeGen.zig | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 0c77197417..2715af08f2 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3183,15 +3183,26 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { const kv = try mod.getErrorValue(name); return WValue{ .imm32 = kv.value }; }, - .error_union => { - const error_type = ty.errorUnionSet(mod); + .error_union => |error_union| { + const err_tv: TypedValue = switch (error_union.val) { + .err_name => |err_name| .{ + .ty = ty.errorUnionSet(mod), + .val = (try mod.intern(.{ .err = .{ + .ty = ty.errorUnionSet(mod).toIntern(), + .name = err_name, + } })).toValue(), + }, + .payload => .{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, 0), + }, + }; const payload_type = ty.errorUnionPayload(mod); if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const is_pl = val.errorUnionIsPayload(mod); - const err_val = if (!is_pl) val else try mod.intValue(error_type, 0); - return func.lowerConstant(err_val, error_type); + return func.lowerConstant(err_tv.val, err_tv.ty); } + return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, .enum_tag => |enum_tag| { -- cgit v1.2.3 From 9cd0ca9f482ef7f76d3f3ca683913e9aceaa47fe Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 26 May 2023 16:04:53 -0400 Subject: Module: rename functions to make ownership checks explicit This makes the difference between `decl.getOwnedFunction` and `decl.val.getFunction` more clear when reading the code. --- src/Module.zig | 66 ++++++++++++++++++++++++----------------------- src/Sema.zig | 12 ++++----- src/arch/wasm/CodeGen.zig | 2 +- src/codegen/c.zig | 12 ++++----- src/codegen/llvm.zig | 24 ++++++++--------- src/codegen/spirv.zig | 6 ++--- src/link/Coff.zig | 10 +++---- src/link/Dwarf.zig | 4 +-- src/link/Elf.zig | 8 +++--- src/link/MachO.zig | 14 +++++----- src/link/Plan9.zig | 8 +++--- src/link/SpirV.zig | 2 +- src/link/Wasm.zig | 10 +++---- 13 files changed, 90 insertions(+), 88 deletions(-) (limited to 'src/arch') diff --git a/src/Module.zig b/src/Module.zig index f78f533006..e24f4e501c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -613,7 +613,7 @@ pub const Decl = struct { pub fn clearValues(decl: *Decl, mod: *Module) void { const gpa = mod.gpa; - if (decl.getFunctionIndex(mod).unwrap()) |func| { + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); if (mod.funcPtr(func).comptime_args != null) { _ = mod.monomorphed_funcs.removeContext(func, .{ .mod = mod }); @@ -772,52 +772,52 @@ pub const Decl = struct { return tv.ty.zigTypeTag(mod) == .Fn; } - /// If the Decl has a value and it is a struct, return it, + /// If the Decl owns its value and it is a struct, return it, /// otherwise null. - pub fn getStruct(decl: Decl, mod: *Module) ?*Struct { - return mod.structPtrUnwrap(decl.getStructIndex(mod)); + pub fn getOwnedStruct(decl: Decl, mod: *Module) ?*Struct { + return mod.structPtrUnwrap(decl.getOwnedStructIndex(mod)); } - pub fn getStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { + pub fn getOwnedStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { if (!decl.owns_tv) return .none; if (decl.val.ip_index == .none) return .none; return mod.intern_pool.indexToStructType(decl.val.toIntern()); } - /// If the Decl has a value and it is a union, return it, + /// If the Decl owns its value and it is a union, return it, /// otherwise null. - pub fn getUnion(decl: Decl, mod: *Module) ?*Union { + pub fn getOwnedUnion(decl: Decl, mod: *Module) ?*Union { if (!decl.owns_tv) return null; if (decl.val.ip_index == .none) return null; return mod.typeToUnion(decl.val.toType()); } - /// If the Decl has a value and it is a function, return it, + /// If the Decl owns its value and it is a function, return it, /// otherwise null. - pub fn getFunction(decl: Decl, mod: *Module) ?*Fn { - return mod.funcPtrUnwrap(decl.getFunctionIndex(mod)); + pub fn getOwnedFunction(decl: Decl, mod: *Module) ?*Fn { + return mod.funcPtrUnwrap(decl.getOwnedFunctionIndex(mod)); } - pub fn getFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { + pub fn getOwnedFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none; } - /// If the Decl has a value and it is an extern function, returns it, + /// If the Decl owns its value and it is an extern function, returns it, /// otherwise null. - pub fn getExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc { + pub fn getOwnedExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc { return if (decl.owns_tv) decl.val.getExternFunc(mod) else null; } - /// If the Decl has a value and it is a variable, returns it, + /// If the Decl owns its value and it is a variable, returns it, /// otherwise null. - pub fn getVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable { + pub fn getOwnedVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable { return if (decl.owns_tv) decl.val.getVariable(mod) else null; } /// Gets the namespace that this Decl creates by being a struct, union, /// enum, or opaque. /// Only returns it if the Decl is the owner. - pub fn getInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex { + pub fn getOwnedInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex { if (!decl.owns_tv) return .none; return switch (decl.val.ip_index) { .empty_struct_type => .none, @@ -833,8 +833,8 @@ pub const Decl = struct { } /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. - pub fn getInnerNamespace(decl: Decl, mod: *Module) ?*Namespace { - return if (decl.getInnerNamespaceIndex(mod).unwrap()) |i| mod.namespacePtr(i) else null; + pub fn getOwnedInnerNamespace(decl: Decl, mod: *Module) ?*Namespace { + return mod.namespacePtrUnwrap(decl.getOwnedInnerNamespaceIndex(mod)); } pub fn dump(decl: *Decl) void { @@ -3361,7 +3361,7 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { gpa.free(kv.value); } if (decl.has_tv) { - if (decl.getInnerNamespaceIndex(mod).unwrap()) |i| { + if (decl.getOwnedInnerNamespaceIndex(mod).unwrap()) |i| { mod.namespacePtr(i).destroyDecls(mod); mod.destroyNamespace(i); } @@ -3407,6 +3407,10 @@ pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.I return mod.intern_pool.inferredErrorSetPtr(index); } +pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace { + return mod.namespacePtr(index.unwrap() orelse return null); +} + /// This one accepts an index from the InternPool and asserts that it is not /// the anonymous empty struct type. pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { @@ -3873,28 +3877,28 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { if (!decl.owns_tv) continue; - if (decl.getStruct(mod)) |struct_obj| { + if (decl.getOwnedStruct(mod)) |struct_obj| { struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getUnion(mod)) |union_obj| { + if (decl.getOwnedUnion(mod)) |union_obj| { union_obj.zir_index = inst_map.get(union_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getFunction(mod)) |func| { + if (decl.getOwnedFunction(mod)) |func| { func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getInnerNamespace(mod)) |namespace| { + if (decl.getOwnedInnerNamespace(mod)) |namespace| { for (namespace.decls.keys()) |sub_decl| { try decl_stack.append(gpa, sub_decl); } @@ -4074,7 +4078,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.deleteDeclExports(decl_index); // Similarly, `@setAlignStack` invocations will be re-discovered. - if (decl.getFunctionIndex(mod).unwrap()) |func| { + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); } @@ -4577,7 +4581,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (mod.declIsRoot(decl_index)) { log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); const main_struct_inst = Zir.main_struct_inst; - const struct_index = decl.getStructIndex(mod).unwrap().?; + const struct_index = decl.getOwnedStructIndex(mod).unwrap().?; const struct_obj = mod.structPtr(struct_index); // This might not have gotten set in `semaFile` if the first time had // a ZIR failure, so we set it here in case. @@ -4659,7 +4663,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (decl.has_tv) { prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); type_changed = !decl.ty.eql(decl_tv.ty, mod); - if (decl.getFunction(mod)) |prev_func| { + if (decl.getOwnedFunction(mod)) |prev_func| { prev_is_inline = prev_func.state == .inline_only; } } @@ -5313,7 +5317,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @intCast(u32, decl_sub_index); - if (decl.getFunctionIndex(mod) != .none) { + if (decl.getOwnedFunctionIndex(mod) != .none) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state @@ -5390,7 +5394,7 @@ pub fn clearDecl( if (decl.ty.isFnOrHasRuntimeBits(mod)) { mod.comp.bin_file.freeDecl(decl_index); } - if (decl.getInnerNamespace(mod)) |namespace| { + if (decl.getOwnedInnerNamespace(mod)) |namespace| { try namespace.deleteAllDecls(mod, outdated_decls); } } @@ -5733,10 +5737,8 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { if (mod.cimport_errors.fetchSwapRemove(decl_index)) |kv| { for (kv.value) |err| err.deinit(mod.gpa); } - if (decl.has_tv and decl.owns_tv) { - if (decl.getFunctionIndex(mod).unwrap()) |func| { - _ = mod.align_stack_fns.remove(func); - } + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { + _ = mod.align_stack_fns.remove(func); } if (mod.emit_h) |emit_h| { if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| { diff --git a/src/Sema.zig b/src/Sema.zig index 51e58f2e7b..8e09d5f378 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5730,7 +5730,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { try mod.ensureDeclAnalyzed(decl_index); const exported_decl = mod.declPtr(decl_index); - if (exported_decl.getFunction(mod)) |function| { + if (exported_decl.val.getFunction(mod)) |function| { return sema.analyzeExport(block, src, options, function.owner_decl); } } @@ -6206,7 +6206,7 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { .extern_func => |extern_func| extern_func.decl, .func => |func| mod.funcPtr(func.index).owner_decl, .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| decl, + .decl => |decl| mod.declPtr(decl).val.getFunction(mod).?.owner_decl, else => return null, }, else => return null, @@ -6782,7 +6782,7 @@ fn analyzeCall( }), .func => |function| function.index, .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| mod.declPtr(decl).getFunctionIndex(mod).unwrap().?, + .decl => |decl| mod.declPtr(decl).val.getFunctionIndex(mod).unwrap().?, else => { assert(callee_ty.isPtrAtRuntime(mod)); return sema.fail(block, call_src, "{s} call of function pointer", .{ @@ -7403,7 +7403,7 @@ fn instantiateGenericCall( const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .func => |function| function.index, - .ptr => |ptr| mod.declPtr(ptr.addr.decl).getFunctionIndex(mod).unwrap().?, + .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.getFunctionIndex(mod).unwrap().?, else => unreachable, }); // Check the Module's generic function map with an adapted context, so that we @@ -28336,7 +28336,7 @@ fn beginComptimePtrLoad( const is_mutable = ptr.addr == .mut_decl; const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); - if (decl.getVariable(mod) != null) return error.RuntimeLoad; + if (decl.val.getVariable(mod) != null) return error.RuntimeLoad; const layout_defined = decl.ty.hasWellDefinedLayout(mod); break :blk ComptimePtrLoadKit{ @@ -29423,7 +29423,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: boo const ptr_ty = try mod.ptrType(.{ .elem_type = decl_tv.ty.toIntern(), .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), - .is_const = if (decl.getVariable(mod)) |variable| variable.is_const else false, + .is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else false, .address_space = decl.@"addrspace", }); if (analyze_fn_body) { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 2715af08f2..91743e0d64 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2210,7 +2210,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif try func.bin_file.addOrUpdateImport( mem.sliceTo(ext_decl.name, 0), atom.getSymbolIndex().?, - mod.intern_pool.stringToSliceUnwrap(ext_decl.getExternFunc(mod).?.lib_name), + mod.intern_pool.stringToSliceUnwrap(ext_decl.getOwnedExternFunc(mod).?.lib_name), type_index, ); break :blk extern_func.decl; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 59d00f5849..ab69514ee1 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -549,12 +549,12 @@ pub const DeclGen = struct { } // Chase function values in order to be able to reference the original function. - if (decl.getFunction(mod)) |func| if (func.owner_decl != decl_index) + if (decl.val.getFunction(mod)) |func| if (func.owner_decl != decl_index) return dg.renderDeclValue(writer, ty, val, func.owner_decl, location); - if (decl.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index) + if (decl.val.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index) return dg.renderDeclValue(writer, ty, val, extern_func.decl, location); - if (decl.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable); + if (decl.val.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable); // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function @@ -1580,7 +1580,7 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold "); + if (fn_decl.val.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( @@ -2740,13 +2740,13 @@ pub fn genDecl(o: *Object) !void { const tv: TypedValue = .{ .ty = decl.ty, .val = decl.val }; if (!tv.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) return; - if (decl.getExternFunc(mod)) |_| { + if (tv.val.getExternFunc(mod)) |_| { const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll("zig_extern "); try o.dg.renderFunctionSignature(fwd_decl_writer, decl_c_value.decl, .forward, .{ .export_index = 0 }); try fwd_decl_writer.writeAll(";\n"); try genExports(o); - } else if (decl.getVariable(mod)) |variable| { + } else if (tv.val.getVariable(mod)) |variable| { try o.dg.renderFwdDecl(decl_c_value.decl, variable); try genExports(o); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b84a8c8c07..1d3749f6a3 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1165,7 +1165,7 @@ pub const Object = struct { di_file = try dg.object.getDIFile(gpa, mod.namespacePtr(decl.src_namespace).file_scope); const line_number = decl.src_line + 1; - const is_internal_linkage = decl.getExternFunc(mod) == null and + const is_internal_linkage = decl.val.getExternFunc(mod) == null and !mod.decl_exports.contains(decl_index); const noret_bit: c_uint = if (fn_info.return_type == .noreturn_type) llvm.DIFlags.NoReturn @@ -1274,7 +1274,7 @@ pub const Object = struct { var free_decl_name = false; const decl_name = decl_name: { if (mod.getTarget().isWasm() and try decl.isFunction(mod)) { - if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| { + if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { free_decl_name = true; break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ decl.name, lib_name }); @@ -1306,7 +1306,7 @@ pub const Object = struct { di_global.replaceLinkageName(linkage_name); } } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } else { @@ -1348,7 +1348,7 @@ pub const Object = struct { defer gpa.free(section_z); llvm_global.setSection(section_z); } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_threadlocal) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); } @@ -1382,7 +1382,7 @@ pub const Object = struct { llvm_global.setLinkage(.Internal); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); llvm_global.setUnnamedAddr(.True); - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { const single_threaded = mod.comp.bin_file.options.single_threaded; if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); @@ -2452,7 +2452,7 @@ pub const DeclGen = struct { log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(), }); - if (decl.getExternFunc(mod)) |extern_func| { + if (decl.val.getExternFunc(mod)) |extern_func| { _ = try dg.resolveLlvmFunction(extern_func.decl); } else { const target = mod.getTarget(); @@ -2460,7 +2460,7 @@ pub const DeclGen = struct { global.setAlignment(decl.getAlignment(mod)); if (decl.@"linksection") |section| global.setSection(section); assert(decl.has_tv); - const init_val = if (decl.getVariable(mod)) |variable| init_val: { + const init_val = if (decl.val.getVariable(mod)) |variable| init_val: { break :init_val variable.init.toValue(); } else init_val: { global.setGlobalConstant(.True); @@ -2555,7 +2555,7 @@ pub const DeclGen = struct { } else { if (target.isWasm()) { dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); - if (mod.intern_pool.stringToSliceUnwrap(decl.getExternFunc(mod).?.lib_name)) |lib_name| { + if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { dg.addFnAttrString(llvm_fn, "wasm-import-module", lib_name); } @@ -2716,7 +2716,7 @@ pub const DeclGen = struct { llvm_global.setValueName(decl.name); llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { const single_threaded = mod.comp.bin_file.options.single_threaded; if (variable.is_threadlocal and !single_threaded) { llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel); @@ -3993,11 +3993,11 @@ pub const DeclGen = struct { // ... &bar; // `bar` is just an alias and we actually want to lower a reference to `foo`. const decl = mod.declPtr(decl_index); - if (decl.getFunction(mod)) |func| { + if (decl.val.getFunction(mod)) |func| { if (func.owner_decl != decl_index) { return self.lowerDeclRefValue(tv, func.owner_decl); } - } else if (decl.getExternFunc(mod)) |func| { + } else if (decl.val.getExternFunc(mod)) |func| { if (func.decl != decl_index) { return self.lowerDeclRefValue(tv, func.decl); } @@ -7939,7 +7939,7 @@ pub const FuncGen = struct { } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const func = self.dg.decl.getFunction(mod).?; + const func = self.dg.decl.getOwnedFunction(mod).?; const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 80e98dbcd3..64a0a7ec57 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -261,7 +261,7 @@ pub const DeclGen = struct { const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { // TODO: Extern fn? - const kind: SpvModule.DeclKind = if (decl.getFunctionIndex(self.module) != .none) + const kind: SpvModule.DeclKind = if (decl.val.getFunctionIndex(self.module) != .none) .func else .global; @@ -1544,7 +1544,7 @@ pub const DeclGen = struct { const decl_id = self.spv.declPtr(spv_decl_index).result_id; log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); - if (decl.getFunction(mod)) |_| { + if (decl.val.getFunction(mod)) |_| { assert(decl.ty.zigTypeTag(mod) == .Fn); const prototype_id = try self.resolveTypeId(decl.ty); try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{ @@ -1597,7 +1597,7 @@ pub const DeclGen = struct { try self.generateTestEntryPoint(fqn, spv_decl_index); } } else { - const init_val = if (decl.getVariable(mod)) |payload| + const init_val = if (decl.val.getVariable(mod)) |payload| payload.init.toValue() else decl.val; diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f4ee2fde97..8b76e8dd69 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1156,10 +1156,10 @@ pub fn updateDecl( const decl = mod.declPtr(decl_index); - if (decl.getExternFunc(mod)) |_| { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -1172,7 +1172,7 @@ pub fn updateDecl( var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, .val = decl_val, @@ -1313,7 +1313,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (decl.getVariable(mod)) |_| { + if (val.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rdata_section_index.?; @@ -1425,7 +1425,7 @@ pub fn updateDeclExports( // detect the default subsystem. for (exports) |exp| { const exported_decl = mod.declPtr(exp.exported_decl); - if (exported_decl.getFunctionIndex(mod) == .none) continue; + if (exported_decl.getOwnedFunctionIndex(mod) == .none) continue; const winapi_cc = switch (self.base.options.target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index d6dd6979ea..9d8076f592 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -971,7 +971,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) // For functions we need to add a prologue to the debug line program. try dbg_line_buffer.ensureTotalCapacity(26); - const func = decl.getFunction(mod).?; + const func = decl.val.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, @@ -1523,7 +1523,7 @@ pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl. if (atom.len == 0) return; const decl = mod.declPtr(decl_index); - const func = decl.getFunction(mod).?; + const func = decl.val.getFunction(mod).?; log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{ decl.src_line, func.lbrace_line, diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 476b939038..e4fa07620d 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2465,7 +2465,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (decl.getVariable(mod)) |_| { + if (val.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.rodata_section_index.?; @@ -2647,10 +2647,10 @@ pub fn updateDecl( const decl = mod.declPtr(decl_index); - if (decl.getExternFunc(mod)) |_| { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -2667,7 +2667,7 @@ pub fn updateDecl( defer if (decl_state) |*ds| ds.deinit(); // TODO implement .debug_info for global variables - const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index ffbdcdb91f..f7f975f920 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1984,16 +1984,16 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo const decl = mod.declPtr(decl_index); - if (decl.getExternFunc(mod)) |_| { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } } - const is_threadlocal = if (decl.getVariable(mod)) |variable| + const is_threadlocal = if (decl.val.getVariable(mod)) |variable| variable.is_threadlocal and !self.base.options.single_threaded else false; @@ -2012,7 +2012,7 @@ pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !vo null; defer if (decl_state) |*ds| ds.deinit(); - const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -2177,7 +2177,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D const decl = module.declPtr(decl_index); const decl_metadata = self.decls.get(decl_index).?; - const decl_val = decl.getVariable(mod).?.init.toValue(); + const decl_val = decl.val.getVariable(mod).?.init.toValue(); const res = if (decl_state) |*ds| try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -2278,7 +2278,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { } } - if (decl.getVariable(mod)) |variable| { + if (val.getVariable(mod)) |variable| { if (variable.is_threadlocal and !single_threaded) { break :blk self.thread_data_section_index.?; } @@ -2289,7 +2289,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 { // TODO: what if this is a function pointer? .Fn => break :blk self.text_section_index.?, else => { - if (decl.getVariable(mod)) |_| { + if (val.getVariable(mod)) |_| { break :blk self.data_section_index.?; } break :blk self.data_const_section_index.?; diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 2071833b93..0803b6beef 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -392,10 +392,10 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void { const decl = mod.declPtr(decl_index); - if (decl.getExternFunc(mod)) |_| { + if (decl.val.getExternFunc(mod)) |_| { return; // TODO Should we do more when front-end analyzed extern decl? } - if (decl.getVariable(mod)) |variable| { + if (decl.val.getVariable(mod)) |variable| { if (variable.is_extern) { return; // TODO Should we do more when front-end analyzed extern decl? } @@ -407,7 +407,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - const decl_val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; // TODO we need the symbol index for symbol in the table of locals for the containing atom const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{ .ty = decl.ty, @@ -771,7 +771,7 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { // in the deleteUnusedDecl function. const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const is_fn = decl.getFunctionIndex(mod) != .none; + const is_fn = decl.val.getFunctionIndex(mod) != .none; if (is_fn) { var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?; var submap = symidx_and_submap.functions; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 0a6608303e..89d6be1ec8 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -138,7 +138,7 @@ pub fn updateDeclExports( exports: []const *Module.Export, ) !void { const decl = mod.declPtr(decl_index); - if (decl.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) { + if (decl.val.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) { // TODO: Unify with resolveDecl in spirv.zig. const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 78d1be978b..96de121ffb 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1404,9 +1404,9 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi defer tracy.end(); const decl = mod.declPtr(decl_index); - if (decl.getFunction(mod)) |_| { + if (decl.val.getFunction(mod)) |_| { return; - } else if (decl.getExternFunc(mod)) |_| { + } else if (decl.val.getExternFunc(mod)) |_| { return; } @@ -1415,12 +1415,12 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi atom.clear(); if (decl.isExtern(mod)) { - const variable = decl.getVariable(mod).?; + const variable = decl.getOwnedVariable(mod).?; const name = mem.sliceTo(decl.name, 0); const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name); return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null); } - const val = if (decl.getVariable(mod)) |variable| variable.init.toValue() else decl.val; + const val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; var code_writer = std.ArrayList(u8).init(wasm.base.allocator); defer code_writer.deinit(); @@ -3373,7 +3373,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod const atom = wasm.getAtomPtr(atom_index); if (decl.ty.zigTypeTag(mod) == .Fn) { try wasm.parseAtom(atom_index, .function); - } else if (decl.getVariable(mod)) |variable| { + } else if (decl.getOwnedVariable(mod)) |variable| { if (variable.is_const) { try wasm.parseAtom(atom_index, .{ .data = .read_only }); } else if (variable.init.toValue().isUndefDeep(mod)) { -- cgit v1.2.3 From 1dc01f11401b6ec0be1e7685cdc445d1b10d4f19 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 26 May 2023 18:56:31 -0400 Subject: InternPool: fix build-exe and compiler-rt crashes --- src/InternPool.zig | 84 +++++++++++++++++++++- src/Module.zig | 4 -- src/Sema.zig | 168 ++++++++++++++++++++++++-------------------- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen/c.zig | 2 +- src/type.zig | 18 ++--- src/value.zig | 5 ++ 7 files changed, 191 insertions(+), 92 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index ad47b4c84e..ea3bafaf48 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2817,6 +2817,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .ptr_type => |ptr_type| { assert(ptr_type.elem_type != .none); + assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.elem_type); if (ptr_type.size == .Slice) { _ = ip.map.pop(); @@ -4780,7 +4781,88 @@ pub fn stringToSliceUnwrap(ip: InternPool, s: OptionalNullTerminatedString) ?[:0 } pub fn typeOf(ip: InternPool, index: Index) Index { - return ip.indexToKey(index).typeOf(); + // This optimization of static keys is required so that typeOf can be called + // on static keys that haven't been added yet during static key initialization. + // An alternative would be to topological sort the static keys, but this would + // mean that the range of type indices would not be dense. + return switch (index) { + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .anyopaque_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .anyframe_type, + .null_type, + .undefined_type, + .enum_literal_type, + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + .prefetch_options_type, + .export_options_type, + .extern_options_type, + .type_info_type, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + .anyerror_void_error_union_type, + .generic_poison_type, + .empty_struct_type, + => .type_type, + .undef => .undefined_type, + .zero, .one, .negative_one => .comptime_int_type, + .zero_usize, .one_usize => .usize_type, + .zero_u8, .one_u8, .four_u8 => .u8_type, + .calling_convention_c, .calling_convention_inline => .calling_convention_type, + .void_value => .void_type, + .unreachable_value => .noreturn_type, + .null_value => .null_type, + .bool_true, .bool_false => .bool_type, + .empty_struct => .empty_struct_type, + .generic_poison => .generic_poison_type, + .var_args_param_type, .none => unreachable, + _ => ip.indexToKey(index).typeOf(), + }; } /// Assumes that the enum's field indexes equal its value tags. diff --git a/src/Module.zig b/src/Module.zig index e24f4e501c..5b4d5c71af 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6898,10 +6898,6 @@ pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.E } pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { - if (std.debug.runtime_safety) { - const tag = ty.zigTypeTag(mod); - assert(tag == .Int or tag == .ComptimeInt); - } if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); var limbs_buffer: [4]usize = undefined; diff --git a/src/Sema.zig b/src/Sema.zig index 8e09d5f378..c351298511 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -848,13 +848,12 @@ pub fn analyzeBodyBreak( block: *Block, body: []const Zir.Inst.Index, ) CompileError!?BreakData { - const mod = sema.mod; const break_inst = sema.analyzeBodyInner(block, body) catch |err| switch (err) { error.ComptimeBreak => sema.comptime_break_inst, else => |e| return e, }; if (block.instructions.items.len != 0 and - sema.typeOf(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])).isNoReturn(mod)) + sema.isNoReturn(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1]))) return null; const break_data = sema.code.instructions.items(.data)[break_inst].@"break"; const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; @@ -9671,9 +9670,9 @@ fn intCast( // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { const one = try mod.intValue(unsigned_operand_ty, 1); - const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, sema.mod); + const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, mod); break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty); - } else dest_max_val; + } else try mod.getCoerced(dest_max_val, unsigned_operand_ty); const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val); const ok = if (is_vector) ok: { @@ -10791,7 +10790,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError check_range: { if (operand_ty.zigTypeTag(mod) == .Int) { - const min_int = try operand_ty.minInt(mod); + const min_int = try operand_ty.minInt(mod, operand_ty); const max_int = try operand_ty.maxInt(mod, operand_ty); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { @@ -11647,7 +11646,7 @@ const RangeSetUnhandledIterator = struct { fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { const mod = sema.mod; - const min = try ty.minInt(mod); + const min = try ty.minInt(mod, ty); const max = try ty.maxInt(mod, ty); return RangeSetUnhandledIterator{ @@ -12452,7 +12451,7 @@ fn zirShr( if (block.wantSafety()) { const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try mod.intValue(scalar_ty, bit_count); + const bit_count_val = try mod.intValue(rhs_ty.scalarType(mod), bit_count); const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); @@ -13297,7 +13296,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13437,7 +13436,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13520,7 +13519,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13608,7 +13607,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13725,7 +13724,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -13805,7 +13804,7 @@ fn addDivIntOverflowSafety( return; } - const min_int = try resolved_type.minInt(mod); + const min_int = try resolved_type.minInt(mod, resolved_type); const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); const neg_one = try sema.splat(resolved_type, neg_one_scalar); @@ -13881,7 +13880,7 @@ fn addDivByZeroSafety( const scalar_zero = if (is_int) try mod.intValue(resolved_type.scalarType(mod), 0) else - try mod.floatValue(resolved_type.scalarType(mod), 0); + try mod.floatValue(resolved_type.scalarType(mod), 0.0); const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); @@ -13967,7 +13966,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), else => unreachable, }; @@ -14575,7 +14574,8 @@ fn analyzeArithmetic( const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); + const scalar_type = resolved_type.scalarType(mod); + const scalar_tag = scalar_type.zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; @@ -14797,8 +14797,13 @@ fn analyzeArithmetic( // the result is nan. // If either of the operands are nan, the result is nan. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + else => unreachable, + }; + const scalar_one = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { @@ -14823,7 +14828,7 @@ fn analyzeArithmetic( const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } @@ -14854,7 +14859,7 @@ fn analyzeArithmetic( const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, try mod.intValue(resolved_type, 1), resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { @@ -14887,8 +14892,8 @@ fn analyzeArithmetic( // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { @@ -14931,8 +14936,8 @@ fn analyzeArithmetic( // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. const scalar_zero = switch (scalar_tag) { - .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0), - .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), else => unreachable, }; if (maybe_lhs_val) |lhs_val| { @@ -18817,7 +18822,10 @@ fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); } - return sema.addConstant(opt_ptr_stack_trace_ty, Value.null); + return sema.addConstant(opt_ptr_stack_trace_ty, (try mod.intern(.{ .opt = .{ + .ty = opt_ptr_stack_trace_ty.toIntern(), + .val = .none, + } })).toValue()); } fn zirFrame( @@ -20103,8 +20111,8 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (block.wantSafety()) { const back = try block.addTyOp(.int_to_float, operand_ty, result); const diff = try block.addBinOp(.sub, operand, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 1))); - const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, -1))); + const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.floatValue(operand_ty, 1.0))); + const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.floatValue(operand_ty, -1.0))); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } @@ -22448,8 +22456,8 @@ fn analyzeMinMax( // Compute the final bounds based on the runtime type and the comptime-known bound type const min_val = switch (air_tag) { - .min => try unrefined_elem_ty.minInt(mod), - .max => try comptime_elem_ty.minInt(mod), // @max(ct, rt) >= ct + .min => try unrefined_elem_ty.minInt(mod, unrefined_elem_ty), + .max => try comptime_elem_ty.minInt(mod, comptime_elem_ty), // @max(ct, rt) >= ct else => unreachable, }; const max_val = switch (air_tag) { @@ -25996,7 +26004,7 @@ fn coerceExtra( if (dest_info.sentinel) |dest_sent| { if (array_ty.sentinel(mod)) |inst_sent| { - if (!dest_sent.eql(inst_sent, dst_elem_type, sema.mod)) { + if (!dest_sent.eql(inst_sent, dst_elem_type, mod)) { in_memory_result = .{ .ptr_sentinel = .{ .actual = inst_sent, .wanted = dest_sent, @@ -26115,7 +26123,7 @@ fn coerceExtra( if (inst_info.size == .Slice) { assert(dest_info.sentinel == null); if (inst_info.sentinel == null or - !inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, sema.mod)) + !inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -26164,7 +26172,7 @@ fn coerceExtra( block, inst_src, "array literal requires address-of operator (&) to coerce to slice type '{}'", - .{dest_ty.fmt(sema.mod)}, + .{dest_ty.fmt(mod)}, ); } @@ -26190,7 +26198,7 @@ fn coerceExtra( // pointer to tuple to slice if (dest_info.mutable) { const err_msg = err_msg: { - const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(sema.mod)}); + const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(mod)}); errdefer err_msg.deinit(sema.gpa); try sema.errNote(block, dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{}); break :err_msg err_msg; @@ -26218,7 +26226,7 @@ fn coerceExtra( } if (dest_info.sentinel == null or inst_info.sentinel == null or - !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, sema.mod)) + !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -26244,7 +26252,7 @@ fn coerceExtra( block, inst_src, "fractional component prevents float value '{}' from coercion to type '{}'", - .{ val.fmtValue(inst_ty, sema.mod), dest_ty.fmt(sema.mod) }, + .{ val.fmtValue(inst_ty, mod), dest_ty.fmt(mod) }, ); } const result_val = try sema.floatToInt(block, inst_src, val, inst_ty, dest_ty); @@ -26258,7 +26266,7 @@ fn coerceExtra( // comptime-known integer to other number if (!(try sema.intFitsInType(val, dest_ty, null))) { if (!opts.report_err) return error.NotCoercible; - return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); + return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) }); } return try sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } @@ -26296,12 +26304,12 @@ fn coerceExtra( } if (try sema.resolveMaybeUndefVal(inst)) |val| { const result_val = try val.floatCast(dest_ty, mod); - if (!val.eql(result_val, inst_ty, sema.mod)) { + if (!val.eql(try result_val.floatCast(inst_ty, mod), inst_ty, mod)) { return sema.fail( block, inst_src, "type '{}' cannot represent float value '{}'", - .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }, + .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) }, ); } return try sema.addConstant(dest_ty, result_val); @@ -26329,7 +26337,7 @@ fn coerceExtra( } break :int; }; - const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, sema.mod, sema); + const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, mod, sema); // TODO implement this compile error //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, mod)) { @@ -26337,7 +26345,7 @@ fn coerceExtra( // block, // inst_src, // "type '{}' cannot represent integer value '{}'", - // .{ dest_ty.fmt(sema.mod), val }, + // .{ dest_ty.fmt(mod), val }, // ); //} return try sema.addConstant(dest_ty, result_val); @@ -26359,7 +26367,7 @@ fn coerceExtra( block, inst_src, "no field named '{s}' in enum '{}'", - .{ bytes, dest_ty.fmt(sema.mod) }, + .{ bytes, dest_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -26375,7 +26383,7 @@ fn coerceExtra( .Union => blk: { // union to its own tag type const union_tag_ty = inst_ty.unionTagType(mod) orelse break :blk; - if (union_tag_ty.eql(dest_ty, sema.mod)) { + if (union_tag_ty.eql(dest_ty, mod)) { return sema.unionToTag(block, dest_ty, inst, inst_src); } }, @@ -26498,15 +26506,15 @@ fn coerceExtra( errdefer msg.destroy(sema.gpa); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{}); + const src_decl = mod.declPtr(sema.func.?.owner_decl); + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const msg = msg: { - const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), inst_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); // E!T to T @@ -26528,18 +26536,18 @@ fn coerceExtra( try in_memory_result.report(sema, block, inst_src, msg); // Add notes about function return type - if (opts.is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) { + if (opts.is_ret and mod.test_functions.get(sema.func.?.owner_decl) == null) { const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); + const src_decl = mod.declPtr(sema.func.?.owner_decl); if (inst_ty.isError(mod) and !dest_ty.isError(mod)) { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{}); + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{}); } else { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function return type declared here", .{}); + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function return type declared here", .{}); } } if (try opts.param_src.get(sema)) |param_src| { - try sema.mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{}); + try mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{}); } // TODO maybe add "cannot store an error in type '{}'" note @@ -26679,7 +26687,7 @@ const InMemoryCoercionResult = union(enum) { }, .error_union_payload => |pair| { try sema.errNote(block, src, msg, "error union payload '{}' cannot cast into error union payload '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26692,18 +26700,18 @@ const InMemoryCoercionResult = union(enum) { .array_sentinel => |sentinel| { if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ - sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod), }); } else { try sema.errNote(block, src, msg, "destination array requires '{}' sentinel", .{ - sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.wanted.fmtValue(sentinel.ty, mod), }); } break; }, .array_elem => |pair| { try sema.errNote(block, src, msg, "array element type '{}' cannot cast into array element type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26715,19 +26723,19 @@ const InMemoryCoercionResult = union(enum) { }, .vector_elem => |pair| { try sema.errNote(block, src, msg, "vector element type '{}' cannot cast into vector element type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, .optional_shape => |pair| { try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.optionalChild(mod).fmt(sema.mod), pair.wanted.optionalChild(mod).fmt(sema.mod), + pair.actual.optionalChild(mod).fmt(mod), pair.wanted.optionalChild(mod).fmt(mod), }); break; }, .optional_child => |pair| { try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26792,7 +26800,7 @@ const InMemoryCoercionResult = union(enum) { }, .fn_param => |param| { try sema.errNote(block, src, msg, "parameter {d} '{}' cannot cast into '{}'", .{ - param.index, param.actual.fmt(sema.mod), param.wanted.fmt(sema.mod), + param.index, param.actual.fmt(mod), param.wanted.fmt(mod), }); cur = param.child; }, @@ -26802,13 +26810,13 @@ const InMemoryCoercionResult = union(enum) { }, .fn_return_type => |pair| { try sema.errNote(block, src, msg, "return type '{}' cannot cast into return type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, .ptr_child => |pair| { try sema.errNote(block, src, msg, "pointer type child '{}' cannot cast into pointer type child '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26819,11 +26827,11 @@ const InMemoryCoercionResult = union(enum) { .ptr_sentinel => |sentinel| { if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ - sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod), }); } else { try sema.errNote(block, src, msg, "destination pointer requires '{}' sentinel", .{ - sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.wanted.fmtValue(sentinel.ty, mod), }); } break; @@ -26847,11 +26855,11 @@ const InMemoryCoercionResult = union(enum) { const actual_allow_zero = pair.actual.ptrAllowsZero(mod); if (actual_allow_zero and !wanted_allow_zero) { try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); } else { try sema.errNote(block, src, msg, "mutable '{}' allows illegal null values stored to type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); } break; @@ -26877,13 +26885,13 @@ const InMemoryCoercionResult = union(enum) { }, .double_ptr_to_anyopaque => |pair| { try sema.errNote(block, src, msg, "cannot implicitly cast double pointer '{}' to anyopaque pointer '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); break; }, .slice_to_anyopaque => |pair| { try sema.errNote(block, src, msg, "cannot implicitly cast slice '{}' to anyopaque pointer '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); try sema.errNote(block, src, msg, "consider using '.ptr'", .{}); break; @@ -27616,25 +27624,24 @@ fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { const mod = sema.mod; const array_ty = sema.typeOf(ptr).childType(mod); if (array_ty.zigTypeTag(mod) != .Array) return null; - var ptr_inst = Air.refToIndex(ptr) orelse return null; + var ptr_ref = ptr; + var ptr_inst = Air.refToIndex(ptr_ref) orelse return null; const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); - const prev_ptr = while (air_tags[ptr_inst] == .bitcast) { - const prev_ptr = air_datas[ptr_inst].ty_op.operand; - const prev_ptr_ty = sema.typeOf(prev_ptr); - if (prev_ptr_ty.zigTypeTag(mod) != .Pointer) return null; - const prev_ptr_child_ty = prev_ptr_ty.childType(mod); - if (prev_ptr_child_ty.zigTypeTag(mod) == .Vector) break prev_ptr; - ptr_inst = Air.refToIndex(prev_ptr) orelse return null; + const vector_ty = while (air_tags[ptr_inst] == .bitcast) { + ptr_ref = air_datas[ptr_inst].ty_op.operand; + if (!sema.isKnownZigType(ptr_ref, .Pointer)) return null; + const child_ty = sema.typeOf(ptr_ref).childType(mod); + if (child_ty.zigTypeTag(mod) == .Vector) break child_ty; + ptr_inst = Air.refToIndex(ptr_ref) orelse return null; } else return null; // We have a pointer-to-array and a pointer-to-vector. If the elements and // lengths match, return the result. - const vector_ty = sema.typeOf(prev_ptr).childType(mod); if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and array_ty.arrayLen(mod) == vector_ty.vectorLen(mod)) { - return prev_ptr; + return ptr_ref; } else { return null; } @@ -34474,3 +34481,12 @@ fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { }; return sema.typeOf(ref).isNoReturn(sema.mod); } + +/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain type. +fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool { + if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { + .inferred_alloc, .inferred_alloc_comptime => return false, + else => {}, + }; + return sema.typeOf(ref).zigTypeTag(sema.mod) == tag; +} diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 00f5b3f3da..fca1b25a1d 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4895,7 +4895,7 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { }); const sign_val = switch (tag) { - .neg => try vec_ty.minInt(mod), + .neg => try vec_ty.minInt(mod, vec_ty), .fabs => try vec_ty.maxInt(mod, vec_ty), else => unreachable, }; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index ab69514ee1..0c5e6e6c48 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6723,7 +6723,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { }, .Max => switch (scalar_ty.zigTypeTag(mod)) { .Bool => try mod.intValue(scalar_ty, 0), - .Int => try scalar_ty.minInt(mod), + .Int => try scalar_ty.minInt(mod, scalar_ty), .Float => try mod.floatValue(scalar_ty, std.math.nan_f128), else => unreachable, }, diff --git a/src/type.zig b/src/type.zig index 5d6f77adf2..cb455d5ebe 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2865,23 +2865,23 @@ pub const Type = struct { } // Works for vectors and vectors of integers. - pub fn minInt(ty: Type, mod: *Module) !Value { - const scalar = try minIntScalar(ty.scalarType(mod), mod); + pub fn minInt(ty: Type, mod: *Module, dest_ty: Type) !Value { + const scalar = try minIntScalar(ty.scalarType(mod), mod, dest_ty); return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), + .ty = dest_ty.toIntern(), .storage = .{ .repeated_elem = scalar.toIntern() }, } })).toValue() else scalar; } /// Asserts that the type is an integer. - pub fn minIntScalar(ty: Type, mod: *Module) !Value { + pub fn minIntScalar(ty: Type, mod: *Module, dest_ty: Type) !Value { const info = ty.intInfo(mod); - if (info.signedness == .unsigned) return mod.intValue(ty, 0); - if (info.bits == 0) return mod.intValue(ty, -1); + if (info.signedness == .unsigned) return mod.intValue(dest_ty, 0); + if (info.bits == 0) return mod.intValue(dest_ty, -1); if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); - return mod.intValue(Type.comptime_int, n); + return mod.intValue(dest_ty, n); } var res = try std.math.big.int.Managed.init(mod.gpa); @@ -2889,7 +2889,7 @@ pub const Type = struct { try res.setTwosCompIntLimit(.min, info.signedness, info.bits); - return mod.intValue_big(Type.comptime_int, res.toConst()); + return mod.intValue_big(dest_ty, res.toConst()); } // Works for vectors and vectors of integers. @@ -2897,7 +2897,7 @@ pub const Type = struct { pub fn maxInt(ty: Type, mod: *Module, dest_ty: Type) !Value { const scalar = try maxIntScalar(ty.scalarType(mod), mod, dest_ty); return if (ty.zigTypeTag(mod) == .Vector) (try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), + .ty = dest_ty.toIntern(), .storage = .{ .repeated_elem = scalar.toIntern() }, } })).toValue() else scalar; } diff --git a/src/value.zig b/src/value.zig index 473b1c967c..23b90f40df 100644 --- a/src/value.zig +++ b/src/value.zig @@ -3166,6 +3166,11 @@ pub const Value = struct { .len = undefined, }; result_bigint.shiftLeft(lhs_bigint, shift); + if (ty.toIntern() != .comptime_int_type) { + const int_info = ty.intInfo(mod); + result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits); + } + return mod.intValue_big(ty, result_bigint.toConst()); } -- cgit v1.2.3 From 3b6ca1d35b950d67fff5964f0063dadf01f30e2d Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 28 May 2023 02:41:22 -0400 Subject: Module: move memoized data to the intern pool This avoids memory management bugs with the previous implementation. --- src/InternPool.zig | 109 ++++++++++++++++++++++++++++- src/Module.zig | 70 ++++--------------- src/Sema.zig | 173 +++++++++++++++++++++++++--------------------- src/TypedValue.zig | 3 + src/arch/wasm/CodeGen.zig | 3 + src/codegen.zig | 3 + src/codegen/c.zig | 6 +- src/codegen/llvm.zig | 3 + src/codegen/spirv.zig | 3 + src/type.zig | 27 ++++++++ src/value.zig | 4 ++ 11 files changed, 264 insertions(+), 140 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index 88b0578707..7ff49c4259 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -217,6 +217,11 @@ pub const Key = union(enum) { /// An instance of a union. un: Union, + /// A declaration with a memoized value. + memoized_decl: MemoizedDecl, + /// A comptime function call with a memoized result. + memoized_call: Key.MemoizedCall, + pub const IntType = std.builtin.Type.Int; pub const ErrorUnionType = struct { @@ -609,6 +614,17 @@ pub const Key = union(enum) { }; }; + pub const MemoizedDecl = struct { + val: Index, + decl: Module.Decl.Index, + }; + + pub const MemoizedCall = struct { + func: Module.Fn.Index, + arg_values: []const Index, + result: Index, + }; + pub fn hash32(key: Key, ip: *const InternPool) u32 { return @truncate(u32, key.hash64(ip)); } @@ -786,6 +802,13 @@ pub const Key = union(enum) { std.hash.autoHash(hasher, func_type.is_generic); std.hash.autoHash(hasher, func_type.is_noinline); }, + + .memoized_decl => |memoized_decl| std.hash.autoHash(hasher, memoized_decl.val), + + .memoized_call => |memoized_call| { + std.hash.autoHash(hasher, memoized_call.func); + for (memoized_call.arg_values) |arg| std.hash.autoHash(hasher, arg); + }, } } @@ -1054,6 +1077,17 @@ pub const Key = union(enum) { a_info.is_generic == b_info.is_generic and a_info.is_noinline == b_info.is_noinline; }, + + .memoized_decl => |a_info| { + const b_info = b.memoized_decl; + return a_info.val == b_info.val; + }, + + .memoized_call => |a_info| { + const b_info = b.memoized_call; + return a_info.func == b_info.func and + std.mem.eql(Index, a_info.arg_values, b_info.arg_values); + }, } } @@ -1105,6 +1139,10 @@ pub const Key = union(enum) { .@"unreachable" => .noreturn_type, .generic_poison => .generic_poison_type, }, + + .memoized_decl, + .memoized_call, + => unreachable, }; } }; @@ -1380,6 +1418,14 @@ pub const Index = enum(u32) { bytes: struct { data: *Bytes }, aggregate: struct { data: *Aggregate }, repeated: struct { data: *Repeated }, + + memoized_decl: struct { data: *Key.MemoizedDecl }, + memoized_call: struct { + const @"data.args_len" = opaque {}; + data: *MemoizedCall, + @"trailing.arg_values.len": *@"data.args_len", + trailing: struct { arg_values: []Index }, + }, }) void { _ = self; const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields; @@ -1875,6 +1921,13 @@ pub const Tag = enum(u8) { /// An instance of an array or vector with every element being the same value. /// data is extra index to `Repeated`. repeated, + + /// A memoized declaration value. + /// data is extra index to `Key.MemoizedDecl` + memoized_decl, + /// A memoized comptime function call result. + /// data is extra index to `MemoizedFunc` + memoized_call, }; /// Trailing: @@ -2271,6 +2324,14 @@ pub const Float128 = struct { } }; +/// Trailing: +/// 0. arg value: Index for each args_len +pub const MemoizedCall = struct { + func: Module.Fn.Index, + args_len: u32, + result: Index, +}; + pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.items.len == 0); @@ -2758,6 +2819,16 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) }, .enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) }, + + .memoized_decl => .{ .memoized_decl = ip.extraData(Key.MemoizedDecl, data) }, + .memoized_call => { + const extra = ip.extraDataTrail(MemoizedCall, data); + return .{ .memoized_call = .{ + .func = extra.data.func, + .arg_values = @ptrCast([]const Index, ip.extra.items[extra.end..][0..extra.data.args_len]), + .result = extra.data.result, + } }; + }, }; } @@ -3724,6 +3795,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .data = try ip.addExtra(gpa, un), }); }, + + .memoized_decl => |memoized_decl| { + assert(memoized_decl.val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .memoized_decl, + .data = try ip.addExtra(gpa, memoized_decl), + }); + }, + + .memoized_call => |memoized_call| { + for (memoized_call.arg_values) |arg| assert(arg != .none); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + + memoized_call.arg_values.len); + ip.items.appendAssumeCapacity(.{ + .tag = .memoized_call, + .data = ip.addExtraAssumeCapacity(MemoizedCall{ + .func = memoized_call.func, + .args_len = @intCast(u32, memoized_call.arg_values.len), + .result = memoized_call.result, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, memoized_call.arg_values)); + }, } return @intToEnum(Index, ip.items.len - 1); } @@ -3788,7 +3882,7 @@ pub fn getIncompleteEnum( ip: *InternPool, gpa: Allocator, enum_type: Key.IncompleteEnumType, -) Allocator.Error!InternPool.IncompleteEnumType { +) Allocator.Error!IncompleteEnumType { switch (enum_type.tag_mode) { .auto => return getIncompleteEnumAuto(ip, gpa, enum_type), .explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit), @@ -3800,7 +3894,7 @@ pub fn getIncompleteEnumAuto( ip: *InternPool, gpa: Allocator, enum_type: Key.IncompleteEnumType, -) Allocator.Error!InternPool.IncompleteEnumType { +) Allocator.Error!IncompleteEnumType { // Although the integer tag type will not be stored in the `EnumAuto` struct, // `InternPool` logic depends on it being present so that `typeOf` can be infallible. // Ensure it is present here: @@ -3849,7 +3943,7 @@ fn getIncompleteEnumExplicit( gpa: Allocator, enum_type: Key.IncompleteEnumType, tag: Tag, -) Allocator.Error!InternPool.IncompleteEnumType { +) Allocator.Error!IncompleteEnumType { // We must keep the map in sync with `items`. The hash and equality functions // for enum types only look at the decl field, which is present even in // an `IncompleteEnumType`. @@ -4704,6 +4798,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { .func => @sizeOf(Key.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), .only_possible_value => 0, .union_value => @sizeOf(Key.Union), + + .memoized_decl => @sizeOf(Key.MemoizedDecl), + .memoized_call => b: { + const info = ip.extraData(MemoizedCall, data); + break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); + }, }); } const SortContext = struct { @@ -5215,6 +5315,9 @@ pub fn zigTypeTagOrPoison(ip: InternPool, index: Index) error{GenericPoison}!std .bytes, .aggregate, .repeated, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, .none => unreachable, // special tag diff --git a/src/Module.zig b/src/Module.zig index 314e636bab..fe9c59583a 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -88,18 +88,10 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, -/// This is currently only used for string literals, however the end-game once the lang spec -/// is settled will be to make this behavior consistent across all types. -memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, - /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. monomorphed_funcs: MonomorphedFuncsSet = .{}, -/// The set of all comptime function calls that have been cached so that future calls -/// with the same parameters will get the same return value. -memoized_calls: MemoizedCallSet = .{}, -memoized_call_args: MemoizedCall.Args = .{}, /// Contains the values from `@setAlignStack`. A sparse table is used here /// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while /// functions are many. @@ -223,42 +215,6 @@ const MonomorphedFuncsContext = struct { } }; -pub const MemoizedCallSet = std.HashMapUnmanaged( - MemoizedCall.Key, - MemoizedCall.Result, - MemoizedCall, - std.hash_map.default_max_load_percentage, -); - -pub const MemoizedCall = struct { - args: *const Args, - - pub const Args = std.ArrayListUnmanaged(InternPool.Index); - - pub const Key = struct { - func: Fn.Index, - args_index: u32, - args_count: u32, - - pub fn args(key: Key, ctx: MemoizedCall) []InternPool.Index { - return ctx.args.items[key.args_index..][0..key.args_count]; - } - }; - - pub const Result = InternPool.Index; - - pub fn eql(ctx: MemoizedCall, a: Key, b: Key) bool { - return a.func == b.func and mem.eql(InternPool.Index, a.args(ctx), b.args(ctx)); - } - - pub fn hash(ctx: MemoizedCall, key: Key) u64 { - var hasher = std.hash.Wyhash.init(0); - std.hash.autoHash(&hasher, key.func); - std.hash.autoHashStrat(&hasher, key.args(ctx), .Deep); - return hasher.final(); - } -}; - pub const SetAlignStack = struct { alignment: u32, /// TODO: This needs to store a non-lazy source location for the case of an inline function @@ -605,7 +561,6 @@ pub const Decl = struct { } mod.destroyFunc(func); } - _ = mod.memoized_decls.remove(decl.val.ip_index); if (decl.value_arena) |value_arena| { value_arena.deinit(gpa); decl.value_arena = null; @@ -3314,8 +3269,6 @@ pub fn deinit(mod: *Module) void { mod.test_functions.deinit(gpa); mod.align_stack_fns.deinit(gpa); mod.monomorphed_funcs.deinit(gpa); - mod.memoized_call_args.deinit(gpa); - mod.memoized_calls.deinit(gpa); mod.decls_free_list.deinit(gpa); mod.allocated_decls.deinit(gpa); @@ -3325,8 +3278,6 @@ pub fn deinit(mod: *Module) void { mod.namespaces_free_list.deinit(gpa); mod.allocated_namespaces.deinit(gpa); - mod.memoized_decls.deinit(gpa); - mod.intern_pool.deinit(gpa); } @@ -5438,6 +5389,17 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { mod.destroyDecl(decl_index); } +/// Finalize the creation of an anon decl. +pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { + // The Decl starts off with alive=false and the codegen backend will set alive=true + // if the Decl is referenced by an instruction or another constant. Otherwise, + // the Decl will be garbage collected by the `codegen_decl` task instead of sent + // to the linker. + if (mod.declPtr(decl_index).ty.isFnOrHasRuntimeBits(mod)) { + try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = decl_index }); + } +} + /// Delete all the Export objects that are caused by this Decl. Re-analysis of /// this Decl will cause them to be re-created (or not). fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { @@ -5875,7 +5837,7 @@ pub fn initNewAnonDecl( namespace: Namespace.Index, typed_value: TypedValue, name: [:0]u8, -) !void { +) Allocator.Error!void { assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern())); errdefer mod.gpa.free(name); @@ -5892,14 +5854,6 @@ pub fn initNewAnonDecl( new_decl.generation = mod.generation; try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); - - // The Decl starts off with alive=false and the codegen backend will set alive=true - // if the Decl is referenced by an instruction or another constant. Otherwise, - // the Decl will be garbage collected by the `codegen_decl` task instead of sent - // to the linker. - if (typed_value.ty.isFnOrHasRuntimeBits(mod)) { - try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index }); - } } pub fn errNoteNonLazy( diff --git a/src/Sema.zig b/src/Sema.zig index c1bcf53ab2..7562794d25 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -734,6 +734,7 @@ pub const Block = struct { errdefer sema.mod.abortAnonDecl(new_decl_index); try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; + try sema.mod.finalizeAnonDecl(new_decl_index); return new_decl_index; } }; @@ -2292,7 +2293,7 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { defer reference_stack.deinit(); // Avoid infinite loops. - var seen = std.AutoHashMap(Module.Decl.Index, void).init(gpa); + var seen = std.AutoHashMap(Decl.Index, void).init(gpa); defer seen.deinit(); var cur_reference_trace: u32 = 0; @@ -2742,7 +2743,9 @@ fn zirStructDecl( try sema.analyzeStructDecl(new_decl, inst, struct_index); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn createAnonymousDeclTypeNamed( @@ -2941,6 +2944,7 @@ fn zirEnumDecl( new_namespace.ty = incomplete_enum.index.toType(); const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); done = true; const int_tag_ty = ty: { @@ -3193,7 +3197,9 @@ fn zirUnionDecl( _ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirOpaqueDecl( @@ -3257,7 +3263,9 @@ fn zirOpaqueDecl( extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirErrorSetDecl( @@ -3298,7 +3306,9 @@ fn zirErrorSetDecl( new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { @@ -5133,32 +5143,35 @@ fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins return sema.addStrLit(block, bytes); } -fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air.Inst.Ref { - // `zir_bytes` references memory inside the ZIR module, which can get deallocated - // after semantic analysis is complete, for example in the case of the initialization - // expression of a variable declaration. +fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Inst.Ref { const mod = sema.mod; - const gpa = sema.gpa; - const ty = try mod.arrayType(.{ - .len = zir_bytes.len, - .child = .u8_type, - .sentinel = .zero_u8, - }); - const val = try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .bytes = zir_bytes }, - } }); - const gop = try mod.memoized_decls.getOrPut(gpa, val); - if (!gop.found_existing) { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); + const memoized_decl_index = memoized: { + const ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); + const val = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = bytes }, + } }); - const decl_index = try anon_decl.finish(ty, val.toValue(), 0); + _ = try sema.typeHasRuntimeBits(ty); + const new_decl_index = try mod.createAnonymousDecl(block, .{ .ty = ty, .val = val.toValue() }); + errdefer mod.abortAnonDecl(new_decl_index); - gop.key_ptr.* = val; - gop.value_ptr.* = decl_index; - } - return sema.analyzeDeclRef(gop.value_ptr.*); + const memoized_index = try mod.intern(.{ .memoized_decl = .{ + .val = val, + .decl = new_decl_index, + } }); + const memoized_decl_index = mod.intern_pool.indexToKey(memoized_index).memoized_decl.decl; + if (memoized_decl_index != new_decl_index) + mod.abortAnonDecl(new_decl_index) + else + try mod.finalizeAnonDecl(new_decl_index); + break :memoized memoized_decl_index; + }; + return sema.analyzeDeclRef(memoized_decl_index); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6868,30 +6881,15 @@ fn analyzeCall( defer child_block.instructions.deinit(gpa); defer merges.deinit(gpa); - // If it's a comptime function call, we need to memoize it as long as no external - // comptime memory is mutated. - var memoized_call_key = Module.MemoizedCall.Key{ - .func = module_fn_index, - .args_index = @intCast(u32, mod.memoized_call_args.items.len), - .args_count = @intCast(u32, func_ty_info.param_types.len), - }; - var delete_memoized_call_key = false; - defer if (delete_memoized_call_key) { - assert(mod.memoized_call_args.items.len >= memoized_call_key.args_index and - mod.memoized_call_args.items.len < memoized_call_key.args_index + memoized_call_key.args_count); - mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); - }; - if (is_comptime_call) { - try mod.memoized_call_args.ensureUnusedCapacity(gpa, memoized_call_key.args_count); - delete_memoized_call_key = true; - } - try sema.emitBackwardBranch(block, call_src); - // Whether this call should be memoized, set to false if the call can mutate - // comptime state. + // Whether this call should be memoized, set to false if the call can mutate comptime state. var should_memoize = true; + // If it's a comptime function call, we need to memoize it as long as no external + // comptime memory is mutated. + const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); + var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?; new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len); new_fn_info.comptime_bits = 0; @@ -6918,6 +6916,7 @@ fn analyzeCall( uncasted_args, is_comptime_call, &should_memoize, + memoized_arg_values, mod.typeToFunc(func_ty).?.param_types, func, &has_comptime_args, @@ -6935,6 +6934,7 @@ fn analyzeCall( uncasted_args, is_comptime_call, &should_memoize, + memoized_arg_values, mod.typeToFunc(func_ty).?.param_types, func, &has_comptime_args, @@ -6988,28 +6988,18 @@ fn analyzeCall( // bug generating invalid LLVM IR. const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { - const gop = try mod.memoized_calls.getOrPutContext( - gpa, - memoized_call_key, - .{ .args = &mod.memoized_call_args }, - ); - if (gop.found_existing) { - assert(mod.memoized_call_args.items.len == memoized_call_key.args_index + memoized_call_key.args_count); - mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); - delete_memoized_call_key = false; - - // We need to use the original memoized error set instead of fn_ret_ty. - const result = gop.value_ptr.*; - assert(result != .none); // recursive memoization? - - break :res2 try sema.addConstant(mod.intern_pool.typeOf(result).toType(), result.toValue()); + if (mod.intern_pool.getIfExists(.{ .memoized_call = .{ + .func = module_fn_index, + .arg_values = memoized_arg_values, + .result = .none, + } })) |memoized_call_index| { + const memoized_call = mod.intern_pool.indexToKey(memoized_call_index).memoized_call; + break :res2 try sema.addConstant( + mod.intern_pool.typeOf(memoized_call.result).toType(), + memoized_call.result.toValue(), + ); } - gop.value_ptr.* = .none; - } else if (delete_memoized_call_key) { - assert(mod.memoized_call_args.items.len == memoized_call_key.args_index + memoized_call_key.args_count); - mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index); } - delete_memoized_call_key = false; const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { @@ -7067,10 +7057,14 @@ fn analyzeCall( if (should_memoize and is_comptime_call) { const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, ""); - mod.memoized_calls.getPtrContext( - memoized_call_key, - .{ .args = &mod.memoized_call_args }, - ).?.* = try result_val.intern(fn_ret_ty, mod); + + // TODO: check whether any external comptime memory was mutated by the + // comptime function call. If so, then do not memoize the call here. + _ = try mod.intern(.{ .memoized_call = .{ + .func = module_fn_index, + .arg_values = memoized_arg_values, + .result = try result_val.intern(fn_ret_ty, mod), + } }); } break :res2 result; @@ -7216,6 +7210,7 @@ fn analyzeInlineCallArg( uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, should_memoize: *bool, + memoized_arg_values: []InternPool.Index, raw_param_types: []const InternPool.Index, func_inst: Air.Inst.Ref, has_comptime_args: *bool, @@ -7279,7 +7274,7 @@ fn analyzeInlineCallArg( }, } should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod); - mod.memoized_call_args.appendAssumeCapacity(try arg_val.intern(param_ty.toType(), mod)); + memoized_arg_values[arg_i.*] = try arg_val.intern(param_ty.toType(), mod); } else { sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg); } @@ -7315,7 +7310,7 @@ fn analyzeInlineCallArg( }, } should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod); - mod.memoized_call_args.appendAssumeCapacity(try arg_val.intern(sema.typeOf(uncasted_arg), mod)); + memoized_arg_values[arg_i.*] = try arg_val.intern(sema.typeOf(uncasted_arg), mod); } else { if (zir_tags[inst] == .param_anytype_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); @@ -19363,7 +19358,9 @@ fn zirReify( } } - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Opaque => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); @@ -19407,7 +19404,9 @@ fn zirReify( new_namespace.ty = opaque_ty.toType(); try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Union => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); @@ -19604,7 +19603,9 @@ fn zirReify( } try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Fn => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); @@ -19902,7 +19903,9 @@ fn reifyStruct( } try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { @@ -31865,6 +31868,9 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; @@ -32997,6 +33003,8 @@ fn generateUnionTagTypeNumbered( .ty = Type.type, .val = undefined, }, name); + errdefer mod.abortAnonDecl(new_decl_index); + const new_decl = mod.declPtr(new_decl_index); new_decl.name_fully_qualified = true; new_decl.owns_tv = true; @@ -33016,6 +33024,7 @@ fn generateUnionTagTypeNumbered( new_decl.val = enum_ty.toValue(); + try mod.finalizeAnonDecl(new_decl_index); return enum_ty.toType(); } @@ -33049,6 +33058,7 @@ fn generateUnionTagTypeSimple( mod.declPtr(new_decl_index).name_fully_qualified = true; break :new_decl_index new_decl_index; }; + errdefer mod.abortAnonDecl(new_decl_index); const enum_ty = try mod.intern(.{ .enum_type = .{ .decl = new_decl_index, @@ -33066,6 +33076,7 @@ fn generateUnionTagTypeSimple( new_decl.owns_tv = true; new_decl.val = enum_ty.toValue(); + try mod.finalizeAnonDecl(new_decl_index); return enum_ty.toType(); } @@ -33358,6 +33369,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; @@ -33843,6 +33857,9 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 8770917a01..f252e84791 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -278,6 +278,9 @@ pub fn print( } else try writer.writeAll("..."); return writer.writeAll(" }"); }, + .memoized_decl, + .memoized_call, + => unreachable, }, }; } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 91743e0d64..d2d54a69c5 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3254,6 +3254,9 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => unreachable, }, .un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), + .memoized_decl, + .memoized_call, + => unreachable, } } diff --git a/src/codegen.zig b/src/codegen.zig index 30ad8ab6e8..d6520ae7e5 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -605,6 +605,9 @@ pub fn generateSymbol( } } }, + .memoized_decl, + .memoized_call, + => unreachable, } return .ok; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a8e2077652..e6ce72f48e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1090,6 +1090,7 @@ pub const DeclGen = struct { }; switch (mod.intern_pool.indexToKey(val.ip_index)) { + // types, not values .int_type, .ptr_type, .array_type, @@ -1106,7 +1107,10 @@ pub const DeclGen = struct { .func_type, .error_set_type, .inferred_error_set_type, - => unreachable, // types, not values + // memoization, not values + .memoized_decl, + .memoized_call, + => unreachable, .undef, .runtime_value => unreachable, // handled above .simple_value => |simple_value| switch (simple_value) { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5ef92c6e46..398a4124cc 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3793,6 +3793,9 @@ pub const DeclGen = struct { return llvm_union_ty.constNamedStruct(&fields, fields_len); } }, + .memoized_decl, + .memoized_call, + => unreachable, } } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 94ea8b7f89..79c9682325 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -830,6 +830,9 @@ pub const DeclGen = struct { try self.addUndef(layout.padding); }, + .memoized_decl, + .memoized_call, + => unreachable, } } }; diff --git a/src/type.zig b/src/type.zig index 0e30debf0a..bc2ce6fc7e 100644 --- a/src/type.zig +++ b/src/type.zig @@ -400,6 +400,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, } } @@ -613,6 +616,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; @@ -719,6 +725,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }; } @@ -1050,6 +1059,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, } @@ -1464,6 +1476,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, } @@ -1695,6 +1710,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, } } @@ -2250,6 +2268,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; @@ -2586,6 +2607,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; @@ -2728,6 +2752,9 @@ pub const Type = struct { .opt, .aggregate, .un, + // memoization, not types + .memoized_decl, + .memoized_call, => unreachable, }, }; diff --git a/src/value.zig b/src/value.zig index a9fb906c0b..a63b83daae 100644 --- a/src/value.zig +++ b/src/value.zig @@ -476,6 +476,10 @@ pub const Value = struct { .tag = un.tag.toValue(), .val = un.val.toValue(), }), + + .memoized_decl, + .memoized_call, + => unreachable, }; } -- cgit v1.2.3 From 3064d2aa7b9a8ea836cb70884b0640fe902ecc29 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 28 May 2023 10:33:59 -0400 Subject: behavior: additional llvm fixes --- src/InternPool.zig | 35 +++++++----- src/Module.zig | 6 +- src/Sema.zig | 133 ++++++++++++++++++++++--------------------- src/TypedValue.zig | 12 ++-- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/arm/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/wasm/CodeGen.zig | 2 +- src/codegen.zig | 2 +- src/codegen/llvm.zig | 8 +-- src/codegen/spirv.zig | 6 +- src/type.zig | 54 +++++++++--------- src/value.zig | 33 +++++++---- 13 files changed, 160 insertions(+), 137 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index 7ff49c4259..55ab58c391 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3131,7 +3131,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }, .enum_type => |enum_type| { - assert(enum_type.tag_ty != .none); + assert(enum_type.tag_ty == .noreturn_type or ip.isIntegerType(enum_type.tag_ty)); + for (enum_type.values) |value| assert(ip.typeOf(value) == enum_type.tag_ty); assert(enum_type.names_map == .none); assert(enum_type.values_map == .none); @@ -3622,14 +3623,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (bytes.len != len) { assert(bytes.len == len_including_sentinel); assert(bytes[len] == ip.indexToKey(sentinel).int.storage.u64); - unreachable; } }, .elems => |elems| { if (elems.len != len) { assert(elems.len == len_including_sentinel); assert(elems[len] == sentinel); - unreachable; } }, .repeated_elem => |elem| { @@ -3832,7 +3831,7 @@ pub const IncompleteEnumType = struct { values_start: u32, pub fn setTagType(self: @This(), ip: *InternPool, tag_ty: Index) void { - assert(tag_ty != .none); + assert(tag_ty == .noreturn_type or ip.isIntegerType(tag_ty)); ip.extra.items[self.tag_ty_index] = @enumToInt(tag_ty); } @@ -3863,6 +3862,7 @@ pub const IncompleteEnumType = struct { gpa: Allocator, value: Index, ) Allocator.Error!?u32 { + assert(ip.typeOf(value) == @intToEnum(Index, ip.extra.items[self.tag_ty_index])); const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)]; const field_index = map.count(); const indexes = ip.extra.items[self.values_start..][0..field_index]; @@ -4346,7 +4346,7 @@ pub fn sliceLen(ip: InternPool, i: Index) Index { /// * ptr <=> ptr /// * opt ptr <=> ptr /// * opt ptr <=> opt ptr -/// * int => ptr +/// * int <=> ptr /// * null_value => opt /// * payload => opt /// * error set <=> error set @@ -4386,18 +4386,18 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .ty = new_ty, .index = func.index, } }), - .int => |int| if (ip.isIntegerType(new_ty)) - return getCoercedInts(ip, gpa, int, new_ty) - else if (ip.isEnumType(new_ty)) - return ip.get(gpa, .{ .enum_tag = .{ + .int => |int| switch (ip.indexToKey(new_ty)) { + .enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{ .ty = new_ty, - .int = val, - } }) - else if (ip.isPointerType(new_ty)) - return ip.get(gpa, .{ .ptr = .{ + .int = try ip.getCoerced(gpa, val, enum_type.tag_ty), + } }), + .ptr_type => return ip.get(gpa, .{ .ptr = .{ .ty = new_ty, - .addr = .{ .int = val }, + .addr = .{ .int = try ip.getCoerced(gpa, val, .usize_type) }, } }), + else => if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, int, new_ty), + }, .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { @@ -4421,7 +4421,12 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al .ty = new_ty, .addr = ptr.addr, .len = ptr.len, - } }), + } }) + else if (ip.isIntegerType(new_ty)) + switch (ptr.addr) { + .int => |int| return ip.getCoerced(gpa, int, new_ty), + else => {}, + }, .opt => |opt| if (ip.isPointerType(new_ty)) return switch (opt.val) { .none => try ip.get(gpa, .{ .ptr = .{ diff --git a/src/Module.zig b/src/Module.zig index fe9c59583a..d11a11cf08 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -707,6 +707,10 @@ pub const Decl = struct { return TypedValue{ .ty = decl.ty, .val = decl.val }; } + pub fn internValue(decl: Decl, mod: *Module) Allocator.Error!InternPool.Index { + return decl.val.intern(decl.ty, mod); + } + pub fn isFunction(decl: Decl, mod: *const Module) !bool { const tv = try decl.typedValue(); return tv.ty.zigTypeTag(mod) == .Fn; @@ -7073,7 +7077,7 @@ pub fn atomicPtrAlignment( const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => try ty.intTagType(mod), + .Enum => ty.intTagType(mod), .Float => { const bit_count = ty.floatBits(target); if (bit_count > max_atomic_bits) { diff --git a/src/Sema.zig b/src/Sema.zig index 7562794d25..7ad7b1a8a3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3291,7 +3291,8 @@ fn zirErrorSetDecl( while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; const name = sema.code.nullTerminatedString(str_index); - const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + const kv = try mod.getErrorValue(name); + const name_ip = try mod.intern_pool.getOrPutString(gpa, kv.key); const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen } @@ -6409,7 +6410,7 @@ fn zirCall( // Generate args to comptime params in comptime block. defer block.is_comptime = parent_comptime; - if (arg_index < fn_params_len and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { + if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { block.is_comptime = true; // TODO set comptime_reason } @@ -7077,14 +7078,13 @@ fn analyzeCall( assert(!func_ty_info.is_generic); const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len); - const fn_info = mod.typeToFunc(func_ty).?; for (uncasted_args, 0..) |uncasted_arg, i| { if (i < fn_params_len) { const opts: CoerceOpts = .{ .param_src = .{ .func_inst = func, .param_i = @intCast(u32, i), } }; - const param_ty = fn_info.param_types[i].toType(); + const param_ty = mod.typeToFunc(func_ty).?.param_types[i].toType(); args[i] = sema.analyzeCallArg( block, .unneeded, @@ -8267,7 +8267,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; const enum_tag_ty = sema.typeOf(enum_tag); - const int_tag_ty = try enum_tag_ty.intTagType(mod); + const int_tag_ty = enum_tag_ty.intTagType(mod); if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { return sema.addConstant(int_tag_ty, try mod.getCoerced(opv, int_tag_ty)); @@ -8299,12 +8299,9 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (try sema.resolveMaybeUndefVal(operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum(mod)) { - const int_tag_ty = try dest_ty.intTagType(mod); + const int_tag_ty = dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { - return sema.addConstant(dest_ty, (try mod.intern(.{ .enum_tag = .{ - .ty = dest_ty.toIntern(), - .int = int_val.toIntern(), - } })).toValue()); + return sema.addConstant(dest_ty, try mod.getCoerced(int_val, dest_ty)); } const msg = msg: { const msg = try sema.errMsg( @@ -8336,7 +8333,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; return sema.failWithOwnedErrorMsg(msg); } - return sema.addConstant(dest_ty, int_val); + return sema.addConstant(dest_ty, try mod.getCoerced(int_val, dest_ty)); } if (try sema.typeHasOnePossibleValue(dest_ty)) |opv| { @@ -9513,7 +9510,7 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); } if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| { - return sema.addConstant(Type.usize, ptr_val); + return sema.addConstant(Type.usize, try mod.getCoerced(ptr_val, Type.usize)); } try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); return block.addUnOp(.ptrtoint, ptr); @@ -9651,7 +9648,7 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_ty); + const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty); const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); const dest_max = try sema.addConstant(operand_ty, dest_max_val); const diff = try block.addBinOp(.subwrap, dest_max, operand); @@ -12848,7 +12845,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (res_sent_val) |sent_val| { const elem_index = try sema.addIntUnsigned(Type.usize, result_len); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); - const init = try sema.addConstant(lhs_info.elem_type, sent_val); + const init = try sema.addConstant(lhs_info.elem_type, try mod.getCoerced(sent_val, lhs_info.elem_type)); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); } @@ -19236,7 +19233,8 @@ fn zirReify( const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); const name_str = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); - const name_ip = try mod.intern_pool.getOrPutString(gpa, name_str); + const kv = try mod.getErrorValue(name_str); + const name_ip = try mod.intern_pool.getOrPutString(gpa, kv.key); const gop = names.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { return sema.fail(block, src, "duplicate error '{s}'", .{name_str}); @@ -19346,7 +19344,7 @@ fn zirReify( return sema.failWithOwnedErrorMsg(msg); } - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, value_val.toIntern())) |other| { + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| { const msg = msg: { const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)}); errdefer msg.destroy(gpa); @@ -20263,7 +20261,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } } - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -20421,7 +20419,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(dest_ty, operand_val); + return sema.addConstant(dest_ty, try mod.getCoerced(operand_val, dest_ty)); } try sema.requireRuntimeBlock(block, src, null); @@ -20624,7 +20622,7 @@ fn zirBitCount( for (elems, 0..) |*elem, i| { const elem_val = try val.elemValue(mod, i); const count = comptimeOp(elem_val, scalar_ty, mod); - elem.* = (try mod.intValue(scalar_ty, count)).toIntern(); + elem.* = (try mod.intValue(result_scalar_ty, count)).toIntern(); } return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ .ty = result_ty.toIntern(), @@ -22385,7 +22383,9 @@ fn analyzeMinMax( if (std.debug.runtime_safety) { assert(try sema.intFitsInType(val, refined_ty, null)); } - cur_minmax = try sema.addConstant(refined_ty, try mod.getCoerced(val, refined_ty)); + cur_minmax = try sema.addConstant(refined_ty, (try sema.resolveMaybeUndefVal( + try sema.coerceInMemory(block, val, orig_ty, refined_ty, src), + )).?); } break :refined refined_ty; @@ -23684,7 +23684,7 @@ fn validateExternType( return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod)); }, .Enum => { - return sema.validateExternType(try ty.intTagType(mod), position); + return sema.validateExternType(ty.intTagType(mod), position); }, .Struct, .Union => switch (ty.containerLayout(mod)) { .Extern => return true, @@ -23762,7 +23762,7 @@ fn explainWhyTypeIsNotExtern( } }, .Enum => { - const tag_ty = try ty.intTagType(mod); + const tag_ty = ty.intTagType(mod); try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, @@ -25412,7 +25412,8 @@ fn elemVal( const elem_ptr_ty = try sema.elemPtrType(indexable_ty, index); const elem_ptr_val = try indexable_val.elemPtr(elem_ptr_ty, index, mod); if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { - return sema.addConstant(indexable_ty.elemType2(mod), elem_val); + const result_ty = indexable_ty.elemType2(mod); + return sema.addConstant(result_ty, try mod.getCoerced(elem_val, result_ty)); } break :rs indexable_src; }; @@ -26603,6 +26604,10 @@ fn coerceInMemory( .storage = .{ .elems = dest_elems }, } })).toValue()); }, + .float => |float| return sema.addConstant(dst_ty, (try mod.intern(.{ .float = .{ + .ty = dst_ty.toIntern(), + .storage = float.storage, + } })).toValue()), else => return sema.addConstant(dst_ty, try mod.getCoerced(val, dst_ty)), } } @@ -26983,8 +26988,11 @@ fn coerceInMemoryAllowed( if (dest_ty.eql(src_ty, mod)) return .ok; + const dest_tag = dest_ty.zigTypeTag(mod); + const src_tag = src_ty.zigTypeTag(mod); + // Differently-named integers with the same number of bits. - if (dest_ty.zigTypeTag(mod) == .Int and src_ty.zigTypeTag(mod) == .Int) { + if (dest_tag == .Int and src_tag == .Int) { const dest_info = dest_ty.intInfo(mod); const src_info = src_ty.intInfo(mod); @@ -27009,7 +27017,7 @@ fn coerceInMemoryAllowed( } // Differently-named floats with the same number of bits. - if (dest_ty.zigTypeTag(mod) == .Float and src_ty.zigTypeTag(mod) == .Float) { + if (dest_tag == .Float and src_tag == .Float) { const dest_bits = dest_ty.floatBits(target); const src_bits = src_ty.floatBits(target); if (dest_bits == src_bits) { @@ -27031,9 +27039,6 @@ fn coerceInMemoryAllowed( return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } - const dest_tag = dest_ty.zigTypeTag(mod); - const src_tag = src_ty.zigTypeTag(mod); - // Functions if (dest_tag == .Fn and src_tag == .Fn) { return try sema.coerceInMemoryAllowedFns(block, dest_ty, src_ty, target, dest_src, src_src); @@ -27808,7 +27813,7 @@ fn beginComptimePtrMutation( .comptime_field => |comptime_field| { const duped = try sema.arena.create(Value); duped.* = comptime_field.toValue(); - return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(ptr_val.toIntern()).toType(), duped, ptr_elem_ty, .{ + return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(comptime_field).toType(), duped, ptr_elem_ty, .{ .decl = undefined, .runtime_index = .comptime_field_ptr, }); @@ -27864,7 +27869,21 @@ fn beginComptimePtrMutation( .direct => |val_ptr| { const payload_ty = parent.ty.optionalChild(mod); switch (val_ptr.ip_index) { - .undef, .null_value => { + .none => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, + else => { + const payload_val = switch (mod.intern_pool.indexToKey(val_ptr.ip_index)) { + .undef => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .opt => |opt| switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), + else => opt.val, + }, + else => unreachable, + }; + // An optional has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the // representation of the optional from `undef` to `opt_payload`. @@ -27874,7 +27893,7 @@ fn beginComptimePtrMutation( const payload = try arena.create(Value.Payload.SubValue); payload.* = .{ .base = .{ .tag = .opt_payload }, - .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), + .data = payload_val.toValue(), }; val_ptr.* = Value.initPayload(&payload.base); @@ -27885,24 +27904,6 @@ fn beginComptimePtrMutation( .ty = payload_ty, }; }, - .none => switch (val_ptr.tag()) { - .opt_payload => return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, - }, - - else => return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - }, - else => return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, } }, .bad_decl_ty, .bad_ptr_ty => return parent, @@ -33339,16 +33340,20 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { return null; }, - .auto, .explicit => switch (enum_type.names.len) { - 0 => return Value.@"unreachable", - 1 => return try mod.getCoerced((if (enum_type.values.len == 0) - try mod.intern(.{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = 0 }, - } }) - else - enum_type.values[0]).toValue(), ty), - else => return null, + .auto, .explicit => { + if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; + + switch (enum_type.names.len) { + 0 => return Value.@"unreachable", + 1 => return try mod.getCoerced((if (enum_type.values.len == 0) + try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }) + else + enum_type.values[0]).toValue(), ty), + else => return null, + } }, }, @@ -34241,13 +34246,9 @@ fn intFitsInType( if (ty.toIntern() == .comptime_int_type) return true; const info = ty.intInfo(mod); switch (val.toIntern()) { - .undef, - .zero, - .zero_usize, - .zero_u8, - => return true, - + .zero_usize, .zero_u8 => return true, else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => return true, .variable, .extern_func, .func, .ptr => { const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); @@ -34553,7 +34554,7 @@ fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { return sema.typeOf(ref).isNoReturn(sema.mod); } -/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain type. +/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain zig type. fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool { if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { .inferred_alloc, .inferred_alloc_comptime => return false, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index f252e84791..a46e6ebe1f 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -302,10 +302,14 @@ fn printAggregate( var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); - switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type, .anon_struct_type => try writer.print(".{s} = ", .{ty.structFieldName(i, mod)}), - else => {}, - } + if (switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[i], + .anon_struct_type => |anon_struct_type| if (anon_struct_type.isTuple()) + null + else + mod.intern_pool.stringToSlice(anon_struct_type.names[i]), + else => unreachable, + }) |field_name| try writer.print(".{s} = ", .{field_name}); try print(.{ .ty = ty.structFieldType(i, mod), .val = try val.fieldValue(mod, i), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 3afb510d43..5874440e50 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4527,7 +4527,7 @@ fn cmp( } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => try lhs_ty.intTagType(mod), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 5f476a2e80..360f52cb30 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -4475,7 +4475,7 @@ fn cmp( } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => try lhs_ty.intTagType(mod), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 354af50b61..3bcdd5ad25 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -1435,7 +1435,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. - .Enum => try lhs_ty.intTagType(mod), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, .Bool => Type.u1, .Pointer => Type.usize, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d2d54a69c5..af2b37312d 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -6883,7 +6883,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { return loc.index; } - const int_tag_ty = try enum_ty.intTagType(mod); + const int_tag_ty = enum_ty.intTagType(mod); if (int_tag_ty.bitSize(mod) > 64) { return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); diff --git a/src/codegen.zig b/src/codegen.zig index d6520ae7e5..0034e96e35 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -312,7 +312,7 @@ pub fn generateSymbol( } }, .enum_tag => |enum_tag| { - const int_tag_ty = try typed_value.ty.intTagType(mod); + const int_tag_ty = typed_value.ty.intTagType(mod); switch (try generateSymbol(bin_file, src_loc, .{ .ty = int_tag_ty, .val = try mod.getCoerced(enum_tag.int.toValue(), int_tag_ty), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 398a4124cc..956924eff8 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2773,7 +2773,7 @@ pub const DeclGen = struct { return dg.context.intType(info.bits); }, .Enum => { - const int_ty = try t.intTagType(mod); + const int_ty = t.intTagType(mod); const bit_count = int_ty.intInfo(mod).bits; assert(bit_count != 0); return dg.context.intType(bit_count); @@ -4148,9 +4148,7 @@ pub const DeclGen = struct { const mod = dg.module; const int_ty = switch (ty.zigTypeTag(mod)) { .Int => ty, - .Enum => ty.intTagType(mod) catch |err| switch (err) { - error.OutOfMemory => @panic("OOM"), - }, + .Enum => ty.intTagType(mod), .Float => { if (!is_rmw_xchg) return null; return dg.context.intType(@intCast(c_uint, ty.abiSize(mod) * 8)); @@ -5100,7 +5098,7 @@ pub const FuncGen = struct { const mod = self.dg.module; const scalar_ty = operand_ty.scalarType(mod); const int_ty = switch (scalar_ty.zigTypeTag(mod)) { - .Enum => try scalar_ty.intTagType(mod), + .Enum => scalar_ty.intTagType(mod), .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(mod); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 79c9682325..0fbcb47f71 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -700,7 +700,7 @@ pub const DeclGen = struct { .enum_tag => { const int_val = try val.enumToInt(ty, mod); - const int_ty = try ty.intTagType(mod); + const int_ty = ty.intTagType(mod); try self.lower(int_ty, int_val); }, @@ -1156,7 +1156,7 @@ pub const DeclGen = struct { return try self.intType(int_info.signedness, int_info.bits); }, .Enum => { - const tag_ty = try ty.intTagType(mod); + const tag_ty = ty.intTagType(mod); return self.resolveType(tag_ty, repr); }, .Float => { @@ -3053,7 +3053,7 @@ pub const DeclGen = struct { break :blk if (backing_bits <= 32) @as(u32, 1) else 2; }, .Enum => blk: { - const int_ty = try cond_ty.intTagType(mod); + const int_ty = cond_ty.intTagType(mod); const int_info = int_ty.intInfo(mod); const backing_bits = self.backingIntBits(int_info.bits) orelse { return self.todo("implement composite int switch", .{}); diff --git a/src/type.zig b/src/type.zig index bc2ce6fc7e..27c7756a68 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1842,17 +1842,15 @@ pub const Type = struct { /// See also `isPtrLikeOptional`. pub fn optionalReprIsPayload(ty: Type, mod: *const Module) bool { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .opt_type => |child| switch (child.toType().zigTypeTag(mod)) { - .Pointer => { - const info = child.toType().ptrInfo(mod); - return switch (info.size) { - .C => false, - else => !info.@"allowzero", - }; + .opt_type => |child_type| switch (mod.intern_pool.indexToKey(child_type)) { + .ptr_type => |ptr_type| switch (ptr_type.size) { + .C => false, + .Slice, .Many, .One => !ptr_type.is_allowzero, }, - .ErrorSet => true, + .error_set_type => true, else => false, }, + .ptr_type => |ptr_type| ptr_type.size == .C, else => false, }; } @@ -2570,23 +2568,27 @@ pub const Type = struct { return null; }, - .auto, .explicit => switch (enum_type.names.len) { - 0 => return Value.@"unreachable", - 1 => { - if (enum_type.values.len == 0) { - const only = try mod.intern(.{ .enum_tag = .{ - .ty = ty.toIntern(), - .int = try mod.intern(.{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = 0 }, - } }), - } }); - return only.toValue(); - } else { - return enum_type.values[0].toValue(); - } - }, - else => return null, + .auto, .explicit => { + if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; + + switch (enum_type.names.len) { + 0 => return Value.@"unreachable", + 1 => { + if (enum_type.values.len == 0) { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }), + } }); + return only.toValue(); + } else { + return enum_type.values[0].toValue(); + } + }, + else => return null, + } }, }, @@ -2887,7 +2889,7 @@ pub const Type = struct { } /// Asserts the type is an enum or a union. - pub fn intTagType(ty: Type, mod: *Module) !Type { + pub fn intTagType(ty: Type, mod: *Module) Type { return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .union_type => |union_type| mod.unionPtr(union_type.index).tag_ty.intTagType(mod), .enum_type => |enum_type| enum_type.tag_ty.toType(), diff --git a/src/value.zig b/src/value.zig index a63b83daae..f02c31ca84 100644 --- a/src/value.zig +++ b/src/value.zig @@ -673,7 +673,7 @@ pub const Value = struct { while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl, .mut_decl, .comptime_field => return true, - .eu_payload, .opt_payload => |index| check = index.toValue(), + .eu_payload, .opt_payload => |base| check = base.toValue(), .elem, .field => |base_index| check = base_index.base.toValue(), else => return false, }, @@ -943,22 +943,27 @@ pub const Value = struct { return Value.true; } }, - .Int, .Enum => { - const int_info = ty.intInfo(mod); + .Int, .Enum => |ty_tag| { + const int_ty = switch (ty_tag) { + .Int => ty, + .Enum => ty.intTagType(mod), + else => unreachable, + }; + const int_info = int_ty.intInfo(mod); const bits = int_info.bits; const byte_count = (bits + 7) / 8; - if (bits == 0 or buffer.len == 0) return mod.intValue(ty, 0); + if (bits == 0 or buffer.len == 0) return mod.getCoerced(try mod.intValue(int_ty, 0), ty); if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 .signed => { const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); - return mod.intValue(ty, result); + return mod.getCoerced(try mod.intValue(int_ty, result), ty); }, .unsigned => { const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); const result = (val << @intCast(u6, 64 - bits)) >> @intCast(u6, 64 - bits); - return mod.intValue(ty, result); + return mod.getCoerced(try mod.intValue(int_ty, result), ty); }, } else { // Slow path, we have to construct a big-int const Limb = std.math.big.Limb; @@ -967,7 +972,7 @@ pub const Value = struct { var bigint = BigIntMutable.init(limbs_buffer, 0); bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness); - return mod.intValue_big(ty, bigint.toConst()); + return mod.getCoerced(try mod.intValue_big(int_ty, bigint.toConst()), ty); } }, .Float => return (try mod.intern(.{ .float = .{ @@ -1583,7 +1588,7 @@ pub const Value = struct { .Enum => { const a_val = try a.enumToInt(ty, mod); const b_val = try b.enumToInt(ty, mod); - const int_ty = try ty.intTagType(mod); + const int_ty = ty.intTagType(mod); return eqlAdvanced(a_val, int_ty, b_val, int_ty, mod, opt_sema); }, .Array, .Vector => { @@ -1835,7 +1840,8 @@ pub const Value = struct { })).toValue(), .ptr => |ptr| switch (ptr.addr) { .decl => |decl| mod.declPtr(decl).val.elemValue(mod, index), - .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).val.elemValue(mod, index), + .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)) + .toValue().elemValue(mod, index), .int, .eu_payload, .opt_payload => unreachable, .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), .elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index), @@ -1946,9 +1952,12 @@ pub const Value = struct { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), - .mut_decl => |mut_decl| try mod.declPtr(mut_decl.decl).val.sliceArray(mod, arena, start, end), - .comptime_field => |comptime_field| try comptime_field.toValue().sliceArray(mod, arena, start, end), - .elem => |elem| try elem.base.toValue().sliceArray(mod, arena, start + elem.index, end + elem.index), + .mut_decl => |mut_decl| (try mod.declPtr(mut_decl.decl).internValue(mod)).toValue() + .sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| comptime_field.toValue() + .sliceArray(mod, arena, start, end), + .elem => |elem| elem.base.toValue() + .sliceArray(mod, arena, start + elem.index, end + elem.index), else => unreachable, }, .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ -- cgit v1.2.3 From a702af062bb65673ba554dba330b4c5ca8d50f3e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 29 May 2023 08:21:47 -0400 Subject: x86_64: fix InternPool regressions --- src/Sema.zig | 10 ++++- src/arch/x86_64/CodeGen.zig | 5 +-- src/codegen.zig | 94 +++++++++++++++++++++++++-------------------- 3 files changed, 61 insertions(+), 48 deletions(-) (limited to 'src/arch') diff --git a/src/Sema.zig b/src/Sema.zig index bd3cfad50d..81e2c6e2ae 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -24311,7 +24311,10 @@ fn fieldVal( .inferred_error_set_type => { return sema.fail(block, src, "TODO handle inferred error sets here", .{}); }, - .simple_type => |t| assert(t == .anyerror), + .simple_type => |t| { + assert(t == .anyerror); + _ = try mod.getErrorValue(field_name); + }, else => unreachable, } @@ -24529,7 +24532,10 @@ fn fieldPtr( .inferred_error_set_type => { return sema.fail(block, src, "TODO handle inferred error sets here", .{}); }, - .simple_type => |t| assert(t == .anyerror), + .simple_type => |t| { + assert(t == .anyerror); + _ = try mod.getErrorValue(field_name); + }, else => unreachable, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index fca1b25a1d..dbb3d977b8 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2128,10 +2128,7 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live const dies = @truncate(u1, tomb_bits) != 0; tomb_bits >>= 1; if (!dies) continue; - const op_int = @enumToInt(op); - if (op_int < Air.ref_start_index) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); - self.processDeath(op_index); + self.processDeath(Air.refToIndexAllowNone(op) orelse continue); } self.finishAirResult(inst, result); } diff --git a/src/codegen.zig b/src/codegen.zig index 0034e96e35..983d895991 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -387,22 +387,24 @@ pub fn generateSymbol( }, .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.toIntern())) { .array_type => |array_type| { - var index: u64 = 0; - while (index < array_type.len) : (index += 1) { - switch (aggregate.storage) { - .bytes => |bytes| try code.appendSlice(bytes), - .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{ - .ty = array_type.child.toType(), - .val = switch (aggregate.storage) { - .bytes => unreachable, - .elems => |elems| elems[@intCast(usize, index)], - .repeated_elem => |elem| elem, - }.toValue(), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - }, - } + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => { + var index: u64 = 0; + while (index < array_type.len) : (index += 1) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, } if (array_type.sentinel != .none) { @@ -416,22 +418,24 @@ pub fn generateSymbol( } }, .vector_type => |vector_type| { - var index: u32 = 0; - while (index < vector_type.len) : (index += 1) { - switch (aggregate.storage) { - .bytes => |bytes| try code.appendSlice(bytes), - .elems, .repeated_elem => switch (try generateSymbol(bin_file, src_loc, .{ - .ty = vector_type.child.toType(), - .val = switch (aggregate.storage) { - .bytes => unreachable, - .elems => |elems| elems[@intCast(usize, index)], - .repeated_elem => |elem| elem, - }.toValue(), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return .{ .fail = em }, - }, - } + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => { + var index: u64 = 0; + while (index < vector_type.len) : (index += 1) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = vector_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, } const padding = math.cast(usize, typed_value.ty.abiSize(mod) - @@ -669,7 +673,7 @@ fn lowerParentPtr( mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))), ), .field => |field| { - const base_type = mod.intern_pool.typeOf(field.base); + const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.elem_type; return lowerParentPtr( bin_file, src_loc, @@ -688,7 +692,7 @@ fn lowerParentPtr( .struct_type, .anon_struct_type, .union_type, - => @intCast(u32, base_type.toType().childType(mod).structFieldOffset( + => @intCast(u32, base_type.toType().structFieldOffset( @intCast(u32, field.index), mod, )), @@ -989,17 +993,23 @@ pub fn genTypedValue( return GenResult.mcv(.{ .immediate = error_index }); }, .ErrorUnion => { - const error_type = typed_value.ty.errorUnionSet(mod); + const err_type = typed_value.ty.errorUnionSet(mod); const payload_type = typed_value.ty.errorUnionPayload(mod); - const is_pl = typed_value.val.errorUnionIsPayload(mod); - if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else try mod.intValue(error_type, 0); - return genTypedValue(bin_file, src_loc, .{ - .ty = error_type, - .val = err_val, - }, owner_decl_index); + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern()).error_union.val) { + .err_name => |err_name| return genTypedValue(bin_file, src_loc, .{ + .ty = err_type, + .val = (try mod.intern(.{ .err = .{ + .ty = err_type.toIntern(), + .name = err_name, + } })).toValue(), + }, owner_decl_index), + .payload => return genTypedValue(bin_file, src_loc, .{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, 0), + }, owner_decl_index), + } } }, -- cgit v1.2.3 From 90a877f462fce8bee69ad366aac66805a7c00571 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 30 May 2023 13:54:22 -0700 Subject: InternPool: pass by const pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Zig language allows the compiler to make this optimization automatically. We should definitely make the compiler do that, and revert this commit. However, that will not happen in this branch, and I want to continue to explore achieving performance parity with merge-base. So, this commit changes all InternPool parameters to be passed by const pointer rather than by value. I measured a 1.03x ± 0.03 speedup vs the previous commit compiling the (set of passing) behavior tests. Against merge-base, this commit is 1.17x ± 0.04 slower, which is an improvement from the previous measurement of 1.22x ± 0.02. Related issue: #13510 Related issue: #14129 Related issue: #15688 --- src/Air.zig | 8 ++--- src/InternPool.zig | 84 ++++++++++++++++++++++---------------------- src/Liveness.zig | 8 ++--- src/Liveness/Verify.zig | 4 +-- src/Module.zig | 2 +- src/Sema.zig | 2 +- src/arch/aarch64/CodeGen.zig | 6 ++-- src/arch/arm/CodeGen.zig | 6 ++-- src/arch/riscv64/CodeGen.zig | 6 ++-- src/arch/sparc64/CodeGen.zig | 6 ++-- src/arch/wasm/CodeGen.zig | 6 ++-- src/arch/x86_64/CodeGen.zig | 6 ++-- src/codegen/c.zig | 6 ++-- src/codegen/llvm.zig | 10 +++--- src/codegen/spirv.zig | 6 ++-- src/print_air.zig | 2 +- src/type.zig | 20 +++++------ 17 files changed, 94 insertions(+), 94 deletions(-) (limited to 'src/arch') diff --git a/src/Air.zig b/src/Air.zig index 56f7d4cf01..b179a3c024 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1182,7 +1182,7 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { +pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: *const InternPool) Type { const ref_int = @enumToInt(inst); if (ref_int < InternPool.static_keys.len) { return InternPool.static_keys[ref_int].typeOf().toType(); @@ -1190,7 +1190,7 @@ pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { return air.typeOfIndex(ref_int - ref_start_index, ip); } -pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: *const InternPool) Type { const datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst]) { .add, @@ -1520,7 +1520,7 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .interned => return air_datas[inst_index].interned.toValue(), - else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), + else => return air.typeOfIndex(inst_index, &mod.intern_pool).onePossibleValue(mod), } } @@ -1537,7 +1537,7 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { /// because it can cause side effects. If an instruction does not need to be /// lowered, and Liveness determines its result is unused, backends should /// avoid lowering it. -pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { +pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { const data = air.instructions.items(.data)[inst]; return switch (air.instructions.items(.tag)[inst]) { .arg, diff --git a/src/InternPool.zig b/src/InternPool.zig index 7debd2c2a3..ffd72245d5 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2992,7 +2992,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }; } -fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { +fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { const type_function = ip.extraDataTrail(TypeFunction, data); const param_types = @ptrCast( []Index, @@ -3015,7 +3015,7 @@ fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { }; } -fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { +fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { const enum_explicit = ip.extraDataTrail(EnumExplicit, data); const names = @ptrCast( []const NullTerminatedString, @@ -3038,7 +3038,7 @@ fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key } }; } -fn indexToKeyBigInt(ip: InternPool, limb_index: u32, positive: bool) Key { +fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key { const int_info = ip.limbData(Int, limb_index); return .{ .int = .{ .ty = int_info.ty, @@ -4351,7 +4351,7 @@ fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { } } -fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { +fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, 0..) |field, i| { @@ -4384,12 +4384,12 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: }; } -fn extraData(ip: InternPool, comptime T: type, index: usize) T { +fn extraData(ip: *const InternPool, comptime T: type, index: usize) T { return extraDataTrail(ip, T, index).data; } /// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. -fn limbData(ip: InternPool, comptime T: type, index: usize) T { +fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { switch (@sizeOf(Limb)) { @sizeOf(u32) => return extraData(ip, T, index), @sizeOf(u64) => {}, @@ -4413,7 +4413,7 @@ fn limbData(ip: InternPool, comptime T: type, index: usize) T { } /// This function returns the Limb slice that is trailing data after a payload. -fn limbSlice(ip: InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { +fn limbSlice(ip: *const InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { const field_count = @typeInfo(S).Struct.fields.len; switch (@sizeOf(Limb)) { @sizeOf(u32) => { @@ -4433,7 +4433,7 @@ const LimbsAsIndexes = struct { len: u32, }; -fn limbsSliceToIndex(ip: InternPool, limbs: []const Limb) LimbsAsIndexes { +fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes { const host_slice = switch (@sizeOf(Limb)) { @sizeOf(u32) => ip.extra.items, @sizeOf(u64) => ip.limbs.items, @@ -4447,7 +4447,7 @@ fn limbsSliceToIndex(ip: InternPool, limbs: []const Limb) LimbsAsIndexes { } /// This function converts Limb array indexes to a primitive slice type. -fn limbsIndexToSlice(ip: InternPool, limbs: LimbsAsIndexes) []const Limb { +fn limbsIndexToSlice(ip: *const InternPool, limbs: LimbsAsIndexes) []const Limb { return switch (@sizeOf(Limb)) { @sizeOf(u32) => ip.extra.items[limbs.start..][0..limbs.len], @sizeOf(u64) => ip.limbs.items[limbs.start..][0..limbs.len], @@ -4485,7 +4485,7 @@ test "basic usage" { try std.testing.expect(another_array_i32 == array_i32); } -pub fn childType(ip: InternPool, i: Index) Index { +pub fn childType(ip: *const InternPool, i: Index) Index { return switch (ip.indexToKey(i)) { .ptr_type => |ptr_type| ptr_type.elem_type, .vector_type => |vector_type| vector_type.child, @@ -4496,7 +4496,7 @@ pub fn childType(ip: InternPool, i: Index) Index { } /// Given a slice type, returns the type of the ptr field. -pub fn slicePtrType(ip: InternPool, i: Index) Index { +pub fn slicePtrType(ip: *const InternPool, i: Index) Index { switch (i) { .slice_const_u8_type => return .manyptr_const_u8_type, .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, @@ -4510,7 +4510,7 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index { } /// Given a slice value, returns the value of the ptr field. -pub fn slicePtr(ip: InternPool, i: Index) Index { +pub fn slicePtr(ip: *const InternPool, i: Index) Index { const item = ip.items.get(@enumToInt(i)); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, @@ -4519,7 +4519,7 @@ pub fn slicePtr(ip: InternPool, i: Index) Index { } /// Given a slice value, returns the value of the len field. -pub fn sliceLen(ip: InternPool, i: Index) Index { +pub fn sliceLen(ip: *const InternPool, i: Index) Index { const item = ip.items.get(@enumToInt(i)); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).len, @@ -4702,7 +4702,7 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } }); } -pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex { +pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .type_struct) return .none; @@ -4710,7 +4710,7 @@ pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { +pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); switch (tags[@enumToInt(val)]) { @@ -4721,7 +4721,7 @@ pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { +pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { assert(val != .none); const tags = ip.items.items(.tag); const datas = ip.items.items(.data); @@ -4731,7 +4731,7 @@ pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { } } -pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { +pub fn indexToFunc(ip: *const InternPool, val: Index) Module.Fn.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .func) return .none; @@ -4739,7 +4739,7 @@ pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { return ip.extraData(Key.Func, datas[@enumToInt(val)]).index.toOptional(); } -pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { +pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .type_inferred_error_set) return .none; @@ -4748,7 +4748,7 @@ pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.Inferre } /// includes .comptime_int_type -pub fn isIntegerType(ip: InternPool, ty: Index) bool { +pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { return switch (ty) { .usize_type, .isize_type, @@ -4769,7 +4769,7 @@ pub fn isIntegerType(ip: InternPool, ty: Index) bool { } /// does not include .enum_literal_type -pub fn isEnumType(ip: InternPool, ty: Index) bool { +pub fn isEnumType(ip: *const InternPool, ty: Index) bool { return switch (ty) { .atomic_order_type, .atomic_rmw_op_type, @@ -4783,35 +4783,35 @@ pub fn isEnumType(ip: InternPool, ty: Index) bool { }; } -pub fn isFunctionType(ip: InternPool, ty: Index) bool { +pub fn isFunctionType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .func_type; } -pub fn isPointerType(ip: InternPool, ty: Index) bool { +pub fn isPointerType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .ptr_type; } -pub fn isOptionalType(ip: InternPool, ty: Index) bool { +pub fn isOptionalType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .opt_type; } /// includes .inferred_error_set_type -pub fn isErrorSetType(ip: InternPool, ty: Index) bool { +pub fn isErrorSetType(ip: *const InternPool, ty: Index) bool { return ty == .anyerror_type or switch (ip.indexToKey(ty)) { .error_set_type, .inferred_error_set_type => true, else => false, }; } -pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool { +pub fn isInferredErrorSetType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .inferred_error_set_type; } -pub fn isErrorUnionType(ip: InternPool, ty: Index) bool { +pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .error_union_type; } -pub fn isAggregateType(ip: InternPool, ty: Index) bool { +pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { return switch (ip.indexToKey(ty)) { .array_type, .vector_type, .anon_struct_type, .struct_type => true, else => false, @@ -4827,11 +4827,11 @@ pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { ip.extra.items[ip.items.items(.data)[@enumToInt(index)] + field_index] = @enumToInt(init_index); } -pub fn dump(ip: InternPool) void { +pub fn dump(ip: *const InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } -fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { +fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { const items_size = (1 + 4) * ip.items.len; const extra_size = 4 * ip.extra.items.len; const limbs_size = 8 * ip.limbs.items.len; @@ -5023,11 +5023,11 @@ pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct { return ip.allocated_structs.at(@enumToInt(index)); } -pub fn structPtrConst(ip: InternPool, index: Module.Struct.Index) *const Module.Struct { +pub fn structPtrConst(ip: *const InternPool, index: Module.Struct.Index) *const Module.Struct { return ip.allocated_structs.at(@enumToInt(index)); } -pub fn structPtrUnwrapConst(ip: InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { +pub fn structPtrUnwrapConst(ip: *const InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { return structPtrConst(ip, index.unwrap() orelse return null); } @@ -5035,7 +5035,7 @@ pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } -pub fn unionPtrConst(ip: InternPool, index: Module.Union.Index) *const Module.Union { +pub fn unionPtrConst(ip: *const InternPool, index: Module.Union.Index) *const Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } @@ -5043,7 +5043,7 @@ pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn { return ip.allocated_funcs.at(@enumToInt(index)); } -pub fn funcPtrConst(ip: InternPool, index: Module.Fn.Index) *const Module.Fn { +pub fn funcPtrConst(ip: *const InternPool, index: Module.Fn.Index) *const Module.Fn { return ip.allocated_funcs.at(@enumToInt(index)); } @@ -5051,7 +5051,7 @@ pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.In return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } -pub fn inferredErrorSetPtrConst(ip: InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { +pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } @@ -5182,7 +5182,7 @@ pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { } } -pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { +pub fn stringToSlice(ip: *const InternPool, s: NullTerminatedString) [:0]const u8 { const string_bytes = ip.string_bytes.items; const start = @enumToInt(s); var end: usize = start; @@ -5190,11 +5190,11 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { return string_bytes[start..end :0]; } -pub fn stringToSliceUnwrap(ip: InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { +pub fn stringToSliceUnwrap(ip: *const InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { return ip.stringToSlice(s.unwrap() orelse return null); } -pub fn typeOf(ip: InternPool, index: Index) Index { +pub fn typeOf(ip: *const InternPool, index: Index) Index { // This optimization of static keys is required so that typeOf can be called // on static keys that haven't been added yet during static key initialization. // An alternative would be to topological sort the static keys, but this would @@ -5382,12 +5382,12 @@ pub fn typeOf(ip: InternPool, index: Index) Index { } /// Assumes that the enum's field indexes equal its value tags. -pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E { +pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { const int = ip.indexToKey(i).enum_tag.int; return @intToEnum(E, ip.indexToKey(int).int.storage.u64); } -pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { +pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, @@ -5397,7 +5397,7 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { }; } -pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 { +pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, @@ -5407,7 +5407,7 @@ pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 { }; } -pub fn isNoReturn(ip: InternPool, ty: Index) bool { +pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, else => switch (ip.indexToKey(ty)) { @@ -5420,7 +5420,7 @@ pub fn isNoReturn(ip: InternPool, ty: Index) bool { /// This is a particularly hot function, so we operate directly on encodings /// rather than the more straightforward implementation of calling `indexToKey`. -pub fn zigTypeTagOrPoison(ip: InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { +pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { return switch (index) { .u1_type, .u8_type, diff --git a/src/Liveness.zig b/src/Liveness.zig index 4f3d87d3c2..b12b638208 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -225,7 +225,7 @@ pub fn categorizeOperand( air: Air, inst: Air.Inst.Index, operand: Air.Inst.Index, - ip: InternPool, + ip: *const InternPool, ) OperandCategory { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); @@ -1139,7 +1139,7 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -1291,7 +1291,7 @@ fn analyzeOperands( // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (!immediate_death or a.air.mustLower(inst, ip.*)) { + if (!immediate_death or a.air.mustLower(inst, ip)) { // Note that it's important we iterate over the operands backwards, so that if a dying // operand is used multiple times we mark its last use as its death. var i = operands.len; @@ -1837,7 +1837,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip.*)) return; + if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index e8b024eb6f..a5fc592894 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -32,7 +32,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { const tag = self.air.instructions.items(.tag); const data = self.air.instructions.items(.data); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) { // This instruction will not be lowered and should be ignored. continue; } @@ -325,7 +325,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); var bt = self.liveness.iterateBigTomb(inst); diff --git a/src/Module.zig b/src/Module.zig index ffc6a95fe1..b1a74932d3 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6726,7 +6726,7 @@ pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { } pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { - const info = Type.ptrInfoIp(mod.intern_pool, ptr_ty.toIntern()); + const info = Type.ptrInfoIp(&mod.intern_pool, ptr_ty.toIntern()); return mod.ptrType(.{ .elem_type = new_child.toIntern(), diff --git a/src/Sema.zig b/src/Sema.zig index 8836e89528..b4e07d749e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -33624,7 +33624,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - return sema.getTmpAir().typeOf(inst, sema.mod.intern_pool); + return sema.getTmpAir().typeOf(inst, &sema.mod.intern_pool); } pub fn getTmpAir(sema: Sema) Air { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 5874440e50..d01a93dd0d 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -660,7 +660,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -6412,10 +6412,10 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 360f52cb30..69a156999b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -644,7 +644,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -6317,10 +6317,10 @@ fn parseRegName(name: []const u8) ?Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5417650dd5..809c388532 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -478,7 +478,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -2737,10 +2737,10 @@ fn parseRegName(name: []const u8) ?Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 3bcdd5ad25..fde5424ddc 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -498,7 +498,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -4883,10 +4883,10 @@ fn wantSafety(self: *Self) bool { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index af2b37312d..e397cf29f8 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2076,7 +2076,7 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { const ip = &mod.intern_pool; for (body) |inst| { - if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip.*)) { + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) { continue; } const old_bookkeeping_value = func.air_bookkeeping; @@ -7436,10 +7436,10 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { const mod = func.bin_file.base.options.module.?; - return func.air.typeOf(inst, mod.intern_pool); + return func.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { const mod = func.bin_file.base.options.module.?; - return func.air.typeOfIndex(inst, mod.intern_pool); + return func.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index dbb3d977b8..b9cc3f7052 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1738,7 +1738,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; wip_mir_log.debug("{}", .{self.fmtAir(inst)}); verbose_tracking_log.debug("{}", .{self.fmtTracking()}); @@ -11992,10 +11992,10 @@ fn hasAllFeatures(self: *Self, features: anytype) bool { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d705d6143e..0db223c6b6 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -489,12 +489,12 @@ pub const Function = struct { fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { const mod = f.object.dg.module; - return f.air.typeOf(inst, mod.intern_pool); + return f.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { const mod = f.object.dg.module; - return f.air.typeOfIndex(inst, mod.intern_pool); + return f.air.typeOfIndex(inst, &mod.intern_pool); } }; @@ -2808,7 +2808,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, const air_tags = f.air.instructions.items(.tag); for (body) |inst| { - if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip.*)) + if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip)) continue; const result_value = switch (air_tags[inst]) { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 606c57b187..8cf6a51ba1 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1574,7 +1574,7 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = Type.ptrInfoIp(mod.intern_pool, ty.toIntern()); + const ptr_info = Type.ptrInfoIp(&mod.intern_pool, ty.toIntern()); if (ptr_info.sentinel != .none or ptr_info.address_space != .generic or @@ -4330,7 +4330,7 @@ pub const FuncGen = struct { const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const opt_value: ?*llvm.Value = switch (air_tags[inst]) { @@ -8055,7 +8055,7 @@ pub const FuncGen = struct { const mod = fg.dg.module; const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { - switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip.*)) { + switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) { .none => continue, .write, .noret, .complex => return false, .tomb => return true, @@ -9920,12 +9920,12 @@ pub const FuncGen = struct { fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { const mod = fg.dg.module; - return fg.air.typeOf(inst, mod.intern_pool); + return fg.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { const mod = fg.dg.module; - return fg.air.typeOfIndex(inst, mod.intern_pool); + return fg.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 0fbcb47f71..ddd7f36435 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1688,7 +1688,7 @@ pub const DeclGen = struct { const mod = self.module; const ip = &mod.intern_pool; // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) return; const air_tags = self.air.instructions.items(.tag); @@ -3339,11 +3339,11 @@ pub const DeclGen = struct { fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { const mod = self.module; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { const mod = self.module; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/print_air.zig b/src/print_air.zig index be7bc9610d..8da80e1360 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -978,6 +978,6 @@ const Writer = struct { fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type { const mod = w.module; - return w.air.typeOfIndex(inst, mod.intern_pool); + return w.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/type.zig b/src/type.zig index f285caff95..fc7821b50b 100644 --- a/src/type.zig +++ b/src/type.zig @@ -102,7 +102,7 @@ pub const Type = struct { }; } - pub fn ptrInfoIp(ip: InternPool, ty: InternPool.Index) InternPool.Key.PtrType { + pub fn ptrInfoIp(ip: *const InternPool, ty: InternPool.Index) InternPool.Key.PtrType { return switch (ip.indexToKey(ty)) { .ptr_type => |p| p, .opt_type => |child| switch (ip.indexToKey(child)) { @@ -114,7 +114,7 @@ pub const Type = struct { } pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { - return Payload.Pointer.Data.fromKey(ptrInfoIp(mod.intern_pool, ty.toIntern())); + return Payload.Pointer.Data.fromKey(ptrInfoIp(&mod.intern_pool, ty.toIntern())); } pub fn eql(a: Type, b: Type, mod: *const Module) bool { @@ -1832,10 +1832,10 @@ pub const Type = struct { } pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { - return isVolatilePtrIp(ty, mod.intern_pool); + return isVolatilePtrIp(ty, &mod.intern_pool); } - pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool { + pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { return switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| ptr_type.is_volatile, else => false, @@ -1920,10 +1920,10 @@ pub const Type = struct { /// For *T, returns T. /// For [*]T, returns T. pub fn childType(ty: Type, mod: *const Module) Type { - return childTypeIp(ty, mod.intern_pool); + return childTypeIp(ty, &mod.intern_pool); } - pub fn childTypeIp(ty: Type, ip: InternPool) Type { + pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { return ip.childType(ty.toIntern()).toType(); } @@ -2164,10 +2164,10 @@ pub const Type = struct { /// Asserts the type is an array or vector or struct. pub fn arrayLen(ty: Type, mod: *const Module) u64 { - return arrayLenIp(ty, mod.intern_pool); + return arrayLenIp(ty, &mod.intern_pool); } - pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { + pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { return switch (ip.indexToKey(ty.toIntern())) { .vector_type => |vector_type| vector_type.len, .array_type => |array_type| array_type.len, @@ -2385,10 +2385,10 @@ pub const Type = struct { /// Asserts the type is a function or a function pointer. pub fn fnReturnType(ty: Type, mod: *Module) Type { - return fnReturnTypeIp(ty, mod.intern_pool); + return fnReturnTypeIp(ty, &mod.intern_pool); } - pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type { + pub fn fnReturnTypeIp(ty: Type, ip: *const InternPool) Type { return switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type, .func_type => |func_type| func_type.return_type, -- cgit v1.2.3 From a0d4ef0acf50db06fdde8ff229d20d15afc7d402 Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 31 May 2023 04:42:18 +0100 Subject: InternPool: add representation for value of empty enums and unions This is a bit odd, because this value doesn't actually exist: see #15909. This gets all the empty enum/union behavior tests passing. Also adds an assertion to `Sema.analyzeBodyInner` which would have helped figure out the issue here much more quickly. --- src/InternPool.zig | 24 +++++++++++++++++++++++- src/Sema.zig | 29 +++++++++++++++++++---------- src/TypedValue.zig | 1 + src/arch/wasm/CodeGen.zig | 1 + src/codegen.zig | 1 + src/codegen/c.zig | 1 + src/codegen/llvm.zig | 1 + src/codegen/spirv.zig | 1 + src/type.zig | 19 +++++++++++++++++-- src/value.zig | 1 + 10 files changed, 66 insertions(+), 13 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index 0b98dfcae5..9047041db8 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -206,6 +206,10 @@ pub const Key = union(enum) { enum_literal: NullTerminatedString, /// A specific enum tag, indicated by the integer tag value. enum_tag: Key.EnumTag, + /// An empty enum or union. TODO: this value's existence is strange, because such a type in + /// reality has no values. See #15909. + /// Payload is the type for which we are an empty value. + empty_enum_value: Index, float: Key.Float, ptr: Ptr, opt: Opt, @@ -670,6 +674,7 @@ pub const Key = union(enum) { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .inferred_error_set_type, => |info| { var hasher = std.hash.Wyhash.init(seed); @@ -957,6 +962,10 @@ pub const Key = union(enum) { const b_info = b.enum_tag; return std.meta.eql(a_info, b_info); }, + .empty_enum_value => |a_info| { + const b_info = b.empty_enum_value; + return a_info == b_info; + }, .variable => |a_info| { const b_info = b.variable; @@ -1192,6 +1201,7 @@ pub const Key = union(enum) { .enum_literal => .enum_literal_type, .undef => |x| x, + .empty_enum_value => |x| x, .simple_value => |s| switch (s) { .undefined => .undefined_type, @@ -1980,6 +1990,7 @@ pub const Tag = enum(u8) { /// The set of values that are encoded this way is: /// * An array or vector which has length 0. /// * A struct which has all fields comptime-known. + /// * An empty enum or union. TODO: this value's existence is strange, because such a type in reality has no values. See #15909 /// data is Index of the type, which is known to be zero bits at runtime. only_possible_value, /// data is extra index to Key.Union. @@ -2952,6 +2963,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, + .type_enum_auto, + .type_enum_explicit, + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => .{ .empty_enum_value = ty }, + else => unreachable, }; }, @@ -3755,6 +3773,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, + .empty_enum_value => |enum_or_union_ty| ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(enum_or_union_ty), + }), + .float => |float| { switch (float.ty) { .f16_type => ip.items.appendAssumeCapacity(.{ @@ -5416,7 +5439,6 @@ pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { .noreturn_type => true, else => switch (ip.indexToKey(ty)) { .error_set_type => |error_set_type| error_set_type.names.len == 0, - .enum_type => |enum_type| enum_type.names.len == 0, else => false, }, }; diff --git a/src/Sema.zig b/src/Sema.zig index 23a54da5ca..c2535eb4e9 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1725,8 +1725,12 @@ fn analyzeBodyInner( break :blk Air.Inst.Ref.void_value; }, }; - if (sema.isNoReturn(air_inst)) + if (sema.isNoReturn(air_inst)) { + // We're going to assume that the body itself is noreturn, so let's ensure that now + assert(block.instructions.items.len > 0); + assert(sema.isNoReturn(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1]))); break always_noreturn; + } map.putAssumeCapacity(inst, air_inst); i += 1; }; @@ -32150,6 +32154,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -33015,10 +33020,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } - if (fields_len == 0) { - return; - } - const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable; @@ -33301,7 +33302,7 @@ fn generateUnionTagTypeNumbered( .decl = new_decl_index, .namespace = .none, .tag_ty = if (enum_field_vals.len == 0) - .noreturn_type + (try mod.intType(.unsigned, 0)).toIntern() else mod.intern_pool.typeOf(enum_field_vals[0]), .names = enum_field_names, @@ -33351,7 +33352,7 @@ fn generateUnionTagTypeSimple( .decl = new_decl_index, .namespace = .none, .tag_ty = if (enum_field_names.len == 0) - .noreturn_type + (try mod.intType(.unsigned, 0)).toIntern() else (try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(), .names = enum_field_names, @@ -33590,7 +33591,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse return null; const fields = union_obj.fields.values(); - if (fields.len == 0) return Value.@"unreachable"; + if (fields.len == 0) { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + } const only_field = fields[0]; if (only_field.ty.eql(resolved_ty, sema.mod)) { const msg = try Module.ErrorMsg.create( @@ -33630,7 +33634,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; switch (enum_type.names.len) { - 0 => return Value.@"unreachable", + 0 => { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + }, 1 => return try mod.getCoerced((if (enum_type.values.len == 0) try mod.intern(.{ .int = .{ .ty = enum_type.tag_ty, @@ -33655,6 +33662,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -34143,6 +34151,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -34848,7 +34857,7 @@ fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { /// Avoids crashing the compiler when asking if inferred allocations are noreturn. fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { - if (ref == .noreturn_type) return true; + if (ref == .unreachable_value) return true; if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { .inferred_alloc, .inferred_alloc_comptime => return false, else => {}, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 0128a3cbfb..81d25ed98a 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -248,6 +248,7 @@ pub fn print( try writer.writeAll(")"); return; }, + .empty_enum_value => return writer.writeAll("(empty enum value)"), .float => |float| switch (float.storage) { inline else => |x| return writer.print("{}", .{x}), }, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index e397cf29f8..e92bd8f676 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3156,6 +3156,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { .extern_func, .func, .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values .int => { const int_info = ty.intInfo(mod); diff --git a/src/codegen.zig b/src/codegen.zig index 7fd432dceb..1470b94f1b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -242,6 +242,7 @@ pub fn generateSymbol( .extern_func, .func, .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values .int => { const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 56f6c669df..eea6e14896 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -946,6 +946,7 @@ pub const DeclGen = struct { .extern_func, .func, .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values .int => |int| switch (int.storage) { .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8b78c4067a..91dcbe11a5 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3246,6 +3246,7 @@ pub const DeclGen = struct { }, .variable, .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values .extern_func, .func => { const fn_decl_index = switch (val_key) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index ddd7f36435..c7bea80eb6 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -660,6 +660,7 @@ pub const DeclGen = struct { .extern_func, .func, .enum_literal, + .empty_enum_value, => unreachable, // non-runtime values .int => try self.addInt(ty, val), .err => |err| { diff --git a/src/type.zig b/src/type.zig index 049ca1ebd8..0c4dfb7e7e 100644 --- a/src/type.zig +++ b/src/type.zig @@ -439,6 +439,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -655,6 +656,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -764,6 +766,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -1098,6 +1101,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -1515,6 +1519,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -1749,6 +1754,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -2302,6 +2308,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -2584,7 +2591,10 @@ pub const Type = struct { .union_type => |union_type| { const union_obj = mod.unionPtr(union_type.index); const tag_val = (try union_obj.tag_ty.onePossibleValue(mod)) orelse return null; - if (union_obj.fields.count() == 0) return Value.@"unreachable"; + if (union_obj.fields.count() == 0) { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + } const only_field = union_obj.fields.values()[0]; const val_val = (try only_field.ty.onePossibleValue(mod)) orelse return null; const only = try mod.intern(.{ .un = .{ @@ -2613,7 +2623,10 @@ pub const Type = struct { if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; switch (enum_type.names.len) { - 0 => return Value.@"unreachable", + 0 => { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + }, 1 => { if (enum_type.values.len == 0) { const only = try mod.intern(.{ .enum_tag = .{ @@ -2645,6 +2658,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, @@ -2790,6 +2804,7 @@ pub const Type = struct { .error_union, .enum_literal, .enum_tag, + .empty_enum_value, .float, .ptr, .opt, diff --git a/src/value.zig b/src/value.zig index fe6a15154c..89ba1fba67 100644 --- a/src/value.zig +++ b/src/value.zig @@ -441,6 +441,7 @@ pub const Value = struct { .err, .enum_literal, .enum_tag, + .empty_enum_value, .float, => val, -- cgit v1.2.3 From bb526426e75ed456a7db6afa32447e5a76ac7ca1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 31 May 2023 15:00:48 -0700 Subject: InternPool: remove memoized_decl This is neither a type nor a value. Simplifies `addStrLit` as well as the many places that switch on `InternPool.Key`. This is a partial revert of bec29b9e498e08202679aa29a45dab2a06a69a1e. --- src/InternPool.zig | 36 +---------------------------------- src/Module.zig | 5 +++++ src/Sema.zig | 48 +++++++++++++++++++---------------------------- src/TypedValue.zig | 4 +--- src/arch/wasm/CodeGen.zig | 4 +--- src/codegen.zig | 4 +--- src/codegen/c.zig | 1 - src/codegen/llvm.zig | 4 +--- src/codegen/spirv.zig | 4 +--- src/type.zig | 9 --------- src/value.zig | 4 +--- 11 files changed, 31 insertions(+), 92 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index 4fc7e3f4e7..59ff9e405b 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -221,8 +221,6 @@ pub const Key = union(enum) { /// An instance of a union. un: Union, - /// A declaration with a memoized value. - memoized_decl: MemoizedDecl, /// A comptime function call with a memoized result. memoized_call: Key.MemoizedCall, @@ -639,11 +637,6 @@ pub const Key = union(enum) { }; }; - pub const MemoizedDecl = struct { - val: Index, - decl: Module.Decl.Index, - }; - pub const MemoizedCall = struct { func: Module.Fn.Index, arg_values: []const Index, @@ -853,8 +846,6 @@ pub const Key = union(enum) { return hasher.final(); }, - .memoized_decl => |x| WyhashKing.hash(seed, asBytes(&x.val)), - .memoized_call => |memoized_call| { var hasher = std.hash.Wyhash.init(seed); std.hash.autoHash(&hasher, memoized_call.func); @@ -1134,11 +1125,6 @@ pub const Key = union(enum) { a_info.is_noinline == b_info.is_noinline; }, - .memoized_decl => |a_info| { - const b_info = b.memoized_decl; - return a_info.val == b_info.val; - }, - .memoized_call => |a_info| { const b_info = b.memoized_call; return a_info.func == b_info.func and @@ -1197,9 +1183,7 @@ pub const Key = union(enum) { .generic_poison => .generic_poison_type, }, - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, }; } }; @@ -1481,7 +1465,6 @@ pub const Index = enum(u32) { }, repeated: struct { data: *Repeated }, - memoized_decl: struct { data: *Key.MemoizedDecl }, memoized_call: struct { const @"data.args_len" = opaque {}; data: *MemoizedCall, @@ -1989,9 +1972,6 @@ pub const Tag = enum(u8) { /// data is extra index to `Repeated`. repeated, - /// A memoized declaration value. - /// data is extra index to `Key.MemoizedDecl` - memoized_decl, /// A memoized comptime function call result. /// data is extra index to `MemoizedCall` memoized_call, @@ -2004,7 +1984,6 @@ pub const Tag = enum(u8) { const ExternFunc = Key.ExternFunc; const Func = Key.Func; const Union = Key.Union; - const MemoizedDecl = Key.MemoizedDecl; const TypePointer = Key.PtrType; fn Payload(comptime tag: Tag) type { @@ -2082,7 +2061,6 @@ pub const Tag = enum(u8) { .bytes => Bytes, .aggregate => Aggregate, .repeated => Repeated, - .memoized_decl => MemoizedDecl, .memoized_call => MemoizedCall, }; } @@ -3000,7 +2978,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) }, .enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) }, - .memoized_decl => .{ .memoized_decl = ip.extraData(Key.MemoizedDecl, data) }, .memoized_call => { const extra = ip.extraDataTrail(MemoizedCall, data); return .{ .memoized_call = .{ @@ -3995,14 +3972,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, - .memoized_decl => |memoized_decl| { - assert(memoized_decl.val != .none); - ip.items.appendAssumeCapacity(.{ - .tag = .memoized_decl, - .data = try ip.addExtra(gpa, memoized_decl), - }); - }, - .memoized_call => |memoized_call| { for (memoized_call.arg_values) |arg| assert(arg != .none); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + @@ -5005,7 +4974,6 @@ fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .only_possible_value => 0, .union_value => @sizeOf(Key.Union), - .memoized_decl => @sizeOf(Key.MemoizedDecl), .memoized_call => b: { const info = ip.extraData(MemoizedCall, data); break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); @@ -5383,7 +5351,6 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .float_comptime_float => .comptime_float_type, - .memoized_decl => unreachable, .memoized_call => unreachable, }, @@ -5624,7 +5591,6 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .aggregate, .repeated, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, diff --git a/src/Module.zig b/src/Module.zig index 862025d8f9..cf0d222a2e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -88,6 +88,9 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, +/// This is currently only used for string literals. +memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, + /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. @@ -561,6 +564,7 @@ pub const Decl = struct { } mod.destroyFunc(func); } + _ = mod.memoized_decls.remove(decl.val.ip_index); if (decl.value_arena) |value_arena| { value_arena.deinit(gpa); decl.value_arena = null; @@ -3285,6 +3289,7 @@ pub fn deinit(mod: *Module) void { mod.namespaces_free_list.deinit(gpa); mod.allocated_namespaces.deinit(gpa); + mod.memoized_decls.deinit(gpa); mod.intern_pool.deinit(gpa); } diff --git a/src/Sema.zig b/src/Sema.zig index 2dba3cac27..be531af60f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5183,33 +5183,26 @@ fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Inst.Ref { const mod = sema.mod; - const memoized_decl_index = memoized: { - const ty = try mod.arrayType(.{ - .len = bytes.len, - .child = .u8_type, - .sentinel = .zero_u8, + const gpa = sema.gpa; + const ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); + const val = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = bytes }, + } }); + const gop = try mod.memoized_decls.getOrPut(gpa, val); + if (!gop.found_existing) { + const new_decl_index = try mod.createAnonymousDecl(block, .{ + .ty = ty, + .val = val.toValue(), }); - const val = try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .bytes = bytes }, - } }); - - _ = try sema.typeHasRuntimeBits(ty); - const new_decl_index = try mod.createAnonymousDecl(block, .{ .ty = ty, .val = val.toValue() }); - errdefer mod.abortAnonDecl(new_decl_index); - - const memoized_index = try mod.intern(.{ .memoized_decl = .{ - .val = val, - .decl = new_decl_index, - } }); - const memoized_decl_index = mod.intern_pool.indexToKey(memoized_index).memoized_decl.decl; - if (memoized_decl_index != new_decl_index) - mod.abortAnonDecl(new_decl_index) - else - try mod.finalizeAnonDecl(new_decl_index); - break :memoized memoized_decl_index; - }; - return sema.analyzeDeclRef(memoized_decl_index); + gop.value_ptr.* = new_decl_index; + try mod.finalizeAnonDecl(new_decl_index); + } + return sema.analyzeDeclRef(gop.value_ptr.*); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -32156,7 +32149,6 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -33666,7 +33658,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -34155,7 +34146,6 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 81d25ed98a..1ff3ce9415 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -279,9 +279,7 @@ pub fn print( } else try writer.writeAll("..."); return writer.writeAll(" }"); }, - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, }, }; } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index e92bd8f676..9403223f30 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3255,9 +3255,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { else => unreachable, }, .un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, } } diff --git a/src/codegen.zig b/src/codegen.zig index 1470b94f1b..a4c88d1258 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -610,9 +610,7 @@ pub fn generateSymbol( } } }, - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, } return .ok; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index eea6e14896..4b325122ca 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -925,7 +925,6 @@ pub const DeclGen = struct { .error_set_type, .inferred_error_set_type, // memoization, not values - .memoized_decl, .memoized_call, => unreachable, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 91dcbe11a5..5da91e5573 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3796,9 +3796,7 @@ pub const DeclGen = struct { return llvm_union_ty.constNamedStruct(&fields, fields_len); } }, - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, } } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index c7bea80eb6..85caec9490 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -831,9 +831,7 @@ pub const DeclGen = struct { try self.addUndef(layout.padding); }, - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, } } }; diff --git a/src/type.zig b/src/type.zig index bbc2a2ce60..61c9377b1d 100644 --- a/src/type.zig +++ b/src/type.zig @@ -446,7 +446,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, } @@ -663,7 +662,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -773,7 +771,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }; @@ -1108,7 +1105,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -1526,7 +1522,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -1761,7 +1756,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, } @@ -2315,7 +2309,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -2666,7 +2659,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, @@ -2812,7 +2804,6 @@ pub const Type = struct { .aggregate, .un, // memoization, not types - .memoized_decl, .memoized_call, => unreachable, }, diff --git a/src/value.zig b/src/value.zig index db37d8e9e7..aba0af176e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -478,9 +478,7 @@ pub const Value = struct { .val = un.val.toValue(), }), - .memoized_decl, - .memoized_call, - => unreachable, + .memoized_call => unreachable, }; } -- cgit v1.2.3 From e2174428e81fc5d84c54ae4833002d943adab38a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 1 Jun 2023 19:49:57 -0400 Subject: wasm: implement missing case --- src/arch/wasm/CodeGen.zig | 1 + 1 file changed, 1 insertion(+) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 9403223f30..7b1258155c 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3320,6 +3320,7 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), .int => |int| intStorageAsI32(int.storage, mod), .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), + .err => |err| @bitCast(i32, mod.global_error_set.get(mod.intern_pool.stringToSlice(err.name)).?), else => unreachable, }, } -- cgit v1.2.3 From 69b7b910929e84248671377e1743477757e66837 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 1 Jun 2023 23:46:04 -0700 Subject: compiler: eliminate Decl.value_arena and Sema.perm_arena The main motivation for this commit is eliminating Decl.value_arena. Everything else is dominoes. Decl.name used to be stored in the GPA, now it is stored in InternPool. It ended up being simpler to migrate other strings to be interned as well, such as struct field names, union field names, and a few others. This ended up requiring a big diff, sorry about that. But the changes are pretty nice, we finally start to take advantage of InternPool's existence. global_error_set and error_name_list are simplified. Now it is a single ArrayHashMap(NullTerminatedString, void) and the index is the error tag value. Module.tmp_hack_arena is re-introduced (it was removed in eeff407941560ce8eb5b737b2436dfa93cfd3a0c) in order to deal with comptime_args, optimized_order, and struct and union fields. After structs and unions get moved into InternPool properly, tmp_hack_arena can be deleted again. --- src/Compilation.zig | 21 +- src/InternPool.zig | 33 ++ src/Module.zig | 332 ++++-------- src/Sema.zig | 1204 +++++++++++++++++++++++------------------- src/TypedValue.zig | 27 +- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 - src/arch/wasm/CodeGen.zig | 26 +- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen.zig | 20 +- src/codegen/c.zig | 72 ++- src/codegen/c/type.zig | 28 +- src/codegen/llvm.zig | 122 ++--- src/codegen/spirv.zig | 13 +- src/link.zig | 11 - src/link/C.zig | 9 +- src/link/Coff.zig | 50 +- src/link/Dwarf.zig | 25 +- src/link/Elf.zig | 33 +- src/link/MachO.zig | 29 +- src/link/Plan9.zig | 30 +- src/link/SpirV.zig | 5 +- src/link/Wasm.zig | 45 +- src/print_air.zig | 3 +- src/type.zig | 32 +- src/value.zig | 116 ++-- 26 files changed, 1160 insertions(+), 1132 deletions(-) (limited to 'src/arch') diff --git a/src/Compilation.zig b/src/Compilation.zig index 0ee916c446..64f947c3c3 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1317,7 +1317,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .global_zir_cache = global_zir_cache, .local_zir_cache = local_zir_cache, .emit_h = emit_h, - .error_name_list = .{}, + .tmp_hack_arena = std.heap.ArenaAllocator.init(gpa), }; try module.init(); @@ -2627,7 +2627,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { var it = module.failed_files.iterator(); while (it.next()) |entry| { if (entry.value_ptr.*) |msg| { - try addModuleErrorMsg(&bundle, msg.*); + try addModuleErrorMsg(module, &bundle, msg.*); } else { // Must be ZIR errors. Note that this may include AST errors. // addZirErrorMessages asserts that the tree is loaded. @@ -2640,7 +2640,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { var it = module.failed_embed_files.iterator(); while (it.next()) |entry| { const msg = entry.value_ptr.*; - try addModuleErrorMsg(&bundle, msg.*); + try addModuleErrorMsg(module, &bundle, msg.*); } } { @@ -2650,7 +2650,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. if (module.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); + try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*); if (module.cimport_errors.get(entry.key_ptr.*)) |cimport_errors| for (cimport_errors) |c_error| { try bundle.addRootErrorMessage(.{ .msg = try bundle.addString(std.mem.span(c_error.msg)), @@ -2675,12 +2675,12 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. if (module.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); + try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*); } } } for (module.failed_exports.values()) |value| { - try addModuleErrorMsg(&bundle, value.*); + try addModuleErrorMsg(module, &bundle, value.*); } } @@ -2728,7 +2728,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { }; } - try addModuleErrorMsg(&bundle, err_msg); + try addModuleErrorMsg(module, &bundle, err_msg); } } @@ -2784,8 +2784,9 @@ pub const ErrorNoteHashContext = struct { } }; -pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { +pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { const gpa = eb.gpa; + const ip = &mod.intern_pool; const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| { const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); @@ -2811,7 +2812,7 @@ pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) .src_loc = .none, }); break; - } else if (module_reference.decl == null) { + } else if (module_reference.decl == .none) { try ref_traces.append(gpa, .{ .decl_name = 0, .src_loc = .none, @@ -2824,7 +2825,7 @@ pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa); defer gpa.free(rt_file_path); try ref_traces.append(gpa, .{ - .decl_name = try eb.addString(std.mem.sliceTo(module_reference.decl.?, 0)), + .decl_name = try eb.addString(ip.stringToSliceUnwrap(module_reference.decl).?), .src_loc = try eb.addSourceLocation(.{ .src_path = try eb.addString(rt_file_path), .span_start = span.start, diff --git a/src/InternPool.zig b/src/InternPool.zig index d593ad1e17..ecdd30d110 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -124,6 +124,8 @@ pub const String = enum(u32) { /// An index into `string_bytes`. pub const NullTerminatedString = enum(u32) { + /// This is distinct from `none` - it is a valid index that represents empty string. + empty = 0, _, pub fn toString(self: NullTerminatedString) String { @@ -157,6 +159,8 @@ pub const NullTerminatedString = enum(u32) { /// An index into `string_bytes` which might be `none`. pub const OptionalNullTerminatedString = enum(u32) { + /// This is distinct from `none` - it is a valid index that represents empty string. + empty = 0, none = std.math.maxInt(u32), _, @@ -2447,6 +2451,9 @@ pub const MemoizedCall = struct { pub fn init(ip: *InternPool, gpa: Allocator) !void { assert(ip.items.len == 0); + // Reserve string index 0 for an empty string. + assert((try ip.getOrPutString(gpa, "")) == .empty); + // So that we can use `catch unreachable` below. try ip.items.ensureUnusedCapacity(gpa, static_keys.len); try ip.map.ensureUnusedCapacity(gpa, static_keys.len); @@ -5222,6 +5229,28 @@ pub fn getOrPutString( return ip.getOrPutTrailingString(gpa, s.len + 1); } +pub fn getOrPutStringFmt( + ip: *InternPool, + gpa: Allocator, + comptime format: []const u8, + args: anytype, +) Allocator.Error!NullTerminatedString { + const start = ip.string_bytes.items.len; + try ip.string_bytes.writer(gpa).print(format, args); + try ip.string_bytes.append(gpa, 0); + return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); +} + +pub fn getOrPutStringOpt( + ip: *InternPool, + gpa: Allocator, + optional_string: ?[]const u8, +) Allocator.Error!OptionalNullTerminatedString { + const s = optional_string orelse return .none; + const interned = try getOrPutString(ip, gpa, s); + return interned.toOptional(); +} + /// Uses the last len bytes of ip.string_bytes as the key. pub fn getOrPutTrailingString( ip: *InternPool, @@ -5273,6 +5302,10 @@ pub fn stringToSliceUnwrap(ip: *const InternPool, s: OptionalNullTerminatedStrin return ip.stringToSlice(s.unwrap() orelse return null); } +pub fn stringEqlSlice(ip: *const InternPool, a: NullTerminatedString, b: []const u8) bool { + return std.mem.eql(u8, stringToSlice(ip, a), b); +} + pub fn typeOf(ip: *const InternPool, index: Index) Index { // This optimization of static keys is required so that typeOf can be called // on static keys that haven't been added yet during static key initialization. diff --git a/src/Module.zig b/src/Module.zig index d575f89b41..1e75ab037d 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -88,6 +88,14 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, /// Stores all Type and Value objects; periodically garbage collected. intern_pool: InternPool = .{}, +/// To be eliminated in a future commit by moving more data into InternPool. +/// Current uses that must be eliminated: +/// * Struct comptime_args +/// * Struct optimized_order +/// * Union fields +/// This memory lives until the Module is destroyed. +tmp_hack_arena: std.heap.ArenaAllocator, + /// This is currently only used for string literals. memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, @@ -125,13 +133,8 @@ cimport_errors: std.AutoArrayHashMapUnmanaged(Decl.Index, []CImportError) = .{}, /// contains Decls that need to be deleted if they end up having no references to them. deletion_set: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, -/// Error tags and their values, tag names are duped with mod.gpa. -/// Corresponds with `error_name_list`. -global_error_set: std.StringHashMapUnmanaged(ErrorInt) = .{}, - -/// ErrorInt -> []const u8 for fast lookups for @intToError at comptime -/// Corresponds with `global_error_set`. -error_name_list: ArrayListUnmanaged([]const u8), +/// Key is the error name, index is the error tag value. Index 0 has a length-0 string. +global_error_set: GlobalErrorSet = .{}, /// Incrementing integer used to compare against the corresponding Decl /// field to determine whether a Decl's status applies to an ongoing update, or a @@ -182,6 +185,8 @@ reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { src: LazySrcLoc, }) = .{}, +pub const GlobalErrorSet = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); + pub const CImportError = struct { offset: u32, line: u32, @@ -248,7 +253,11 @@ pub const GlobalEmitH = struct { pub const ErrorInt = u32; pub const Export = struct { - options: std.builtin.ExportOptions, + name: InternPool.NullTerminatedString, + linkage: std.builtin.GlobalLinkage, + section: InternPool.OptionalNullTerminatedString, + visibility: std.builtin.SymbolVisibility, + src: LazySrcLoc, /// The Decl that performs the export. Note that this is *not* the Decl being exported. owner_decl: Decl.Index, @@ -392,8 +401,7 @@ const ValueArena = struct { }; pub const Decl = struct { - /// Allocated with Module's allocator; outlives the ZIR code. - name: [*:0]const u8, + name: InternPool.NullTerminatedString, /// The most recent Type of the Decl after a successful semantic analysis. /// Populated when `has_tv`. ty: Type, @@ -401,15 +409,11 @@ pub const Decl = struct { /// Populated when `has_tv`. val: Value, /// Populated when `has_tv`. - /// Points to memory inside value_arena. - @"linksection": ?[*:0]const u8, + @"linksection": InternPool.OptionalNullTerminatedString, /// Populated when `has_tv`. @"align": u32, /// Populated when `has_tv`. @"addrspace": std.builtin.AddressSpace, - /// The memory for ty, val, align, linksection, and captures. - /// If this is `null` then there is no memory management needed. - value_arena: ?*ValueArena = null, /// The direct parent namespace of the Decl. /// Reference to externally owned memory. /// In the case of the Decl corresponding to a file, this is @@ -564,13 +568,7 @@ pub const Decl = struct { function_body, }; - pub fn clearName(decl: *Decl, gpa: Allocator) void { - gpa.free(mem.sliceTo(decl.name, 0)); - decl.name = undefined; - } - pub fn clearValues(decl: *Decl, mod: *Module) void { - const gpa = mod.gpa; if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); if (mod.funcPtr(func).comptime_args != null) { @@ -579,19 +577,6 @@ pub const Decl = struct { mod.destroyFunc(func); } _ = mod.memoized_decls.remove(decl.val.ip_index); - if (decl.value_arena) |value_arena| { - value_arena.deinit(gpa); - decl.value_arena = null; - decl.has_tv = false; - decl.owns_tv = false; - } - } - - pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void { - assert(decl.value_arena == null); - const value_arena = try arena.allocator().create(ValueArena); - value_arena.* = .{ .state = arena.state }; - decl.value_arena = value_arena; } /// This name is relative to the containing namespace of the decl. @@ -692,7 +677,7 @@ pub const Decl = struct { } pub fn renderFullyQualifiedName(decl: Decl, mod: *Module, writer: anytype) !void { - const unqualified_name = mem.sliceTo(decl.name, 0); + const unqualified_name = mod.intern_pool.stringToSlice(decl.name); if (decl.name_fully_qualified) { return writer.writeAll(unqualified_name); } @@ -700,24 +685,27 @@ pub const Decl = struct { } pub fn renderFullyQualifiedDebugName(decl: Decl, mod: *Module, writer: anytype) !void { - const unqualified_name = mem.sliceTo(decl.name, 0); + const unqualified_name = mod.intern_pool.stringToSlice(decl.name); return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(mod, unqualified_name, writer); } - pub fn getFullyQualifiedName(decl: Decl, mod: *Module) ![:0]u8 { - var buffer = std.ArrayList(u8).init(mod.gpa); - defer buffer.deinit(); - try decl.renderFullyQualifiedName(mod, buffer.writer()); + pub fn getFullyQualifiedName(decl: Decl, mod: *Module) !InternPool.NullTerminatedString { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + const start = ip.string_bytes.items.len; + try decl.renderFullyQualifiedName(mod, ip.string_bytes.writer(gpa)); // Sanitize the name for nvptx which is more restrictive. + // TODO This should be handled by the backend, not the frontend. Have a + // look at how the C backend does it for inspiration. if (mod.comp.bin_file.options.target.cpu.arch.isNvptx()) { - for (buffer.items) |*byte| switch (byte.*) { + for (ip.string_bytes.items[start..]) |*byte| switch (byte.*) { '{', '}', '*', '[', ']', '(', ')', ',', ' ', '\'' => byte.* = '_', else => {}, }; } - return buffer.toOwnedSliceSentinel(0); + return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); } pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue { @@ -804,11 +792,11 @@ pub const Decl = struct { pub fn dump(decl: *Decl) void { const loc = std.zig.findLineColumn(decl.scope.source.bytes, decl.src); - std.debug.print("{s}:{d}:{d} name={s} status={s}", .{ + std.debug.print("{s}:{d}:{d} name={d} status={s}", .{ decl.scope.sub_file_path, loc.line + 1, loc.column + 1, - mem.sliceTo(decl.name, 0), + @enumToInt(decl.name), @tagName(decl.analysis), }); if (decl.has_tv) { @@ -922,15 +910,15 @@ pub const Struct = struct { } }; - pub const Fields = std.StringArrayHashMapUnmanaged(Field); + pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field); /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl. pub const Field = struct { /// Uses `noreturn` to indicate `anytype`. /// undefined until `status` is >= `have_field_types`. ty: Type, - /// Uses `unreachable_value` to indicate no default. - default_val: Value, + /// Uses `none` to indicate no default. + default_val: InternPool.Index, /// Zero means to use the ABI alignment of the type. abi_align: u32, /// undefined until `status` is `have_layout`. @@ -982,7 +970,7 @@ pub const Struct = struct { /// runtime version of the struct. pub const omitted_field = std.math.maxInt(u32); - pub fn getFullyQualifiedName(s: *Struct, mod: *Module) ![:0]u8 { + pub fn getFullyQualifiedName(s: *Struct, mod: *Module) !InternPool.NullTerminatedString { return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } @@ -1141,9 +1129,9 @@ pub const Union = struct { } }; - pub const Fields = std.StringArrayHashMapUnmanaged(Field); + pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field); - pub fn getFullyQualifiedName(s: *Union, mod: *Module) ![:0]u8 { + pub fn getFullyQualifiedName(s: *Union, mod: *Module) !InternPool.NullTerminatedString { return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } @@ -1569,15 +1557,15 @@ pub const Fn = struct { pub const DeclAdapter = struct { mod: *Module, - pub fn hash(self: @This(), s: []const u8) u32 { + pub fn hash(self: @This(), s: InternPool.NullTerminatedString) u32 { _ = self; - return @truncate(u32, std.hash.Wyhash.hash(0, s)); + return std.hash.uint32(@enumToInt(s)); } - pub fn eql(self: @This(), a: []const u8, b_decl_index: Decl.Index, b_index: usize) bool { + pub fn eql(self: @This(), a: InternPool.NullTerminatedString, b_decl_index: Decl.Index, b_index: usize) bool { _ = b_index; const b_decl = self.mod.declPtr(b_decl_index); - return mem.eql(u8, a, mem.sliceTo(b_decl.name, 0)); + return a == b_decl.name; } }; @@ -1628,16 +1616,14 @@ pub const Namespace = struct { pub fn hash(ctx: @This(), decl_index: Decl.Index) u32 { const decl = ctx.module.declPtr(decl_index); - return @truncate(u32, std.hash.Wyhash.hash(0, mem.sliceTo(decl.name, 0))); + return std.hash.uint32(@enumToInt(decl.name)); } pub fn eql(ctx: @This(), a_decl_index: Decl.Index, b_decl_index: Decl.Index, b_index: usize) bool { _ = b_index; const a_decl = ctx.module.declPtr(a_decl_index); const b_decl = ctx.module.declPtr(b_decl_index); - const a_name = mem.sliceTo(a_decl.name, 0); - const b_name = mem.sliceTo(b_decl.name, 0); - return mem.eql(u8, a_name, b_name); + return a_decl.name == b_decl.name; } }; @@ -1649,8 +1635,6 @@ pub const Namespace = struct { pub fn destroyDecls(ns: *Namespace, mod: *Module) void { const gpa = mod.gpa; - log.debug("destroyDecls {*}", .{ns}); - var decls = ns.decls; ns.decls = .{}; @@ -1676,8 +1660,6 @@ pub const Namespace = struct { ) !void { const gpa = mod.gpa; - log.debug("deleteAllDecls {*}", .{ns}); - var decls = ns.decls; ns.decls = .{}; @@ -1712,7 +1694,8 @@ pub const Namespace = struct { if (ns.parent.unwrap()) |parent| { const decl_index = ns.getDeclIndex(mod); const decl = mod.declPtr(decl_index); - try mod.namespacePtr(parent).renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer); + const decl_name = mod.intern_pool.stringToSlice(decl.name); + try mod.namespacePtr(parent).renderFullyQualifiedName(mod, decl_name, writer); } else { try ns.file_scope.renderFullyQualifiedName(writer); } @@ -1733,7 +1716,8 @@ pub const Namespace = struct { if (ns.parent.unwrap()) |parent| { const decl_index = ns.getDeclIndex(mod); const decl = mod.declPtr(decl_index); - try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer); + const decl_name = mod.intern_pool.stringToSlice(decl.name); + try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, decl_name, writer); } else { try ns.file_scope.renderFullyQualifiedDebugName(writer); separator_char = ':'; @@ -1927,11 +1911,11 @@ pub const File = struct { }; } - pub fn fullyQualifiedNameZ(file: File, gpa: Allocator) ![:0]u8 { - var buf = std.ArrayList(u8).init(gpa); - defer buf.deinit(); - try file.renderFullyQualifiedName(buf.writer()); - return buf.toOwnedSliceSentinel(0); + pub fn fullyQualifiedName(file: File, mod: *Module) !InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + const start = ip.string_bytes.items.len; + try file.renderFullyQualifiedName(ip.string_bytes.writer(mod.gpa)); + return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start); } /// Returns the full path to this file relative to its package. @@ -2055,7 +2039,7 @@ pub const ErrorMsg = struct { reference_trace: []Trace = &.{}, pub const Trace = struct { - decl: ?[*:0]const u8, + decl: InternPool.OptionalNullTerminatedString, src_loc: SrcLoc, hidden: u32 = 0, }; @@ -3180,8 +3164,8 @@ pub const CompileError = error{ pub fn init(mod: *Module) !void { const gpa = mod.gpa; - try mod.error_name_list.append(gpa, "(no error)"); try mod.intern_pool.init(gpa); + try mod.global_error_set.put(gpa, .empty, {}); } pub fn deinit(mod: *Module) void { @@ -3282,15 +3266,8 @@ pub fn deinit(mod: *Module) void { } mod.export_owners.deinit(gpa); - { - var it = mod.global_error_set.keyIterator(); - while (it.next()) |key| { - gpa.free(key.*); - } - mod.global_error_set.deinit(gpa); - } + mod.global_error_set.deinit(gpa); - mod.error_name_list.deinit(gpa); mod.test_functions.deinit(gpa); mod.align_stack_fns.deinit(gpa); mod.monomorphed_funcs.deinit(gpa); @@ -3305,13 +3282,13 @@ pub fn deinit(mod: *Module) void { mod.memoized_decls.deinit(gpa); mod.intern_pool.deinit(gpa); + mod.tmp_hack_arena.deinit(); } pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { const gpa = mod.gpa; { const decl = mod.declPtr(decl_index); - log.debug("destroy {*} ({s})", .{ decl, decl.name }); _ = mod.test_functions.swapRemove(decl_index); if (decl.deletion_flag) { assert(mod.deletion_set.swapRemove(decl_index)); @@ -3329,7 +3306,6 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { decl.clearValues(mod); decl.dependants.deinit(gpa); decl.dependencies.deinit(gpa); - decl.clearName(gpa); decl.* = undefined; } mod.decls_free_list.append(gpa, decl_index) catch { @@ -3391,11 +3367,7 @@ pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { } fn freeExportList(gpa: Allocator, export_list: *ArrayListUnmanaged(*Export)) void { - for (export_list.items) |exp| { - gpa.free(exp.options.name); - if (exp.options.section) |s| gpa.free(s); - gpa.destroy(exp); - } + for (export_list.items) |exp| gpa.destroy(exp); export_list.deinit(gpa); } @@ -3814,9 +3786,6 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { if (decl.zir_decl_index != 0) { const old_zir_decl_index = decl.zir_decl_index; const new_zir_decl_index = extra_map.get(old_zir_decl_index) orelse { - log.debug("updateZirRefs {s}: delete {*} ({s})", .{ - file.sub_file_path, decl, decl.name, - }); try file.deleted_decls.append(gpa, decl_index); continue; }; @@ -3824,14 +3793,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { decl.zir_decl_index = new_zir_decl_index; const new_hash = decl.contentsHashZir(new_zir); if (!std.zig.srcHashEql(old_hash, new_hash)) { - log.debug("updateZirRefs {s}: outdated {*} ({s}) {d} => {d}", .{ - file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, - }); try file.outdated_decls.append(gpa, decl_index); - } else { - log.debug("updateZirRefs {s}: unchanged {*} ({s}) {d} => {d}", .{ - file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, - }); } } @@ -4031,8 +3993,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .complete => return, .outdated => blk: { - log.debug("re-analyzing {*} ({s})", .{ decl, decl.name }); - // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. try mod.deleteDeclExports(decl_index); @@ -4047,9 +4007,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { const dep = mod.declPtr(dep_index); dep.removeDependant(decl_index); if (dep.dependants.count() == 0 and !dep.deletion_flag) { - log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ - decl, decl.name, dep, dep.name, - }); try mod.markDeclForDeletion(dep_index); } } @@ -4061,7 +4018,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .unreferenced => false, }; - var decl_prog_node = mod.sema_prog_node.start(mem.sliceTo(decl.name, 0), 0); + var decl_prog_node = mod.sema_prog_node.start(mod.intern_pool.stringToSlice(decl.name), 0); decl_prog_node.activate(); defer decl_prog_node.end(); @@ -4190,14 +4147,11 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void if (no_bin_file and !dump_air and !dump_llvm_ir) return; - log.debug("analyze liveness of {s}", .{decl.name}); var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool); defer liveness.deinit(gpa); if (dump_air) { - const fqn = try decl.getFullyQualifiedName(mod); - defer mod.gpa.free(fqn); - + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); std.debug.print("# Begin Function AIR: {s}:\n", .{fqn}); @import("print_air.zig").dump(mod, air, liveness); std.debug.print("# End Function AIR: {s}\n\n", .{fqn}); @@ -4354,9 +4308,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { if (file.root_decl != .none) return; const gpa = mod.gpa; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); // Because these three things each reference each other, `undefined` // placeholders are used before being set after the struct type gains an @@ -4394,7 +4345,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { new_namespace.ty = struct_ty.toType(); file.root_decl = new_decl_index.toOptional(); - new_decl.name = try file.fullyQualifiedNameZ(gpa); + new_decl.name = try file.fullyQualifiedName(mod); new_decl.src_line = 0; new_decl.is_pub = true; new_decl.is_exported = false; @@ -4403,7 +4354,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { new_decl.ty = Type.type; new_decl.val = struct_ty.toValue(); new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.owns_tv = true; new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive. @@ -4431,7 +4382,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .mod = mod, .gpa = gpa, .arena = sema_arena_allocator, - .perm_arena = new_decl_arena_allocator, .code = file.zir, .owner_decl = new_decl, .owner_decl_index = new_decl_index, @@ -4484,8 +4434,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { } else { new_decl.analysis = .file_failure; } - - try new_decl.finalizeNewArena(&new_decl_arena); } /// Returns `true` if the Decl type changed. @@ -4507,28 +4455,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.analysis = .in_progress; - // We need the memory for the Type to go into the arena for the Decl - var decl_arena = std.heap.ArenaAllocator.init(gpa); - const decl_arena_allocator = decl_arena.allocator(); - const decl_value_arena = blk: { - errdefer decl_arena.deinit(); - const s = try decl_arena_allocator.create(ValueArena); - s.* = .{ .state = undefined }; - break :blk s; - }; - defer { - if (decl.value_arena) |value_arena| { - assert(value_arena.state_acquired == null); - decl_value_arena.prev = value_arena; - } - - decl_value_arena.state = decl_arena.state; - decl.value_arena = decl_value_arena; - } - var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - const analysis_arena_allocator = analysis_arena.allocator(); var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); defer comptime_mutable_decls.deinit(); @@ -4536,8 +4464,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = analysis_arena_allocator, - .perm_arena = decl_arena_allocator, + .arena = analysis_arena.allocator(), .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -4551,7 +4478,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { defer sema.deinit(); if (mod.declIsRoot(decl_index)) { - log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); const main_struct_inst = Zir.main_struct_inst; const struct_index = decl.getOwnedStructIndex(mod).unwrap().?; const struct_obj = mod.structPtr(struct_index); @@ -4563,7 +4489,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.generation = mod.generation; return false; } - log.debug("semaDecl {*} ({s})", .{ decl, decl.name }); var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); @@ -4619,7 +4544,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.ty = InternPool.Index.type_type.toType(); decl.val = ty.toValue(); decl.@"align" = 0; - decl.@"linksection" = null; + decl.@"linksection" = .none; decl.has_tv = true; decl.owns_tv = false; decl.analysis = .complete; @@ -4646,7 +4571,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.clearValues(mod); decl.ty = decl_tv.ty; - decl.val = try decl_tv.val.copy(decl_arena_allocator); + decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); // linksection, align, and addrspace were already set by Sema decl.has_tv = true; decl.owns_tv = owns_tv; @@ -4660,7 +4585,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return sema.fail(&block_scope, export_src, "export of inline function", .{}); } // The scope needs to have the decl in it. - const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) }; + const options: std.builtin.ExportOptions = .{ + .name = mod.intern_pool.stringToSlice(decl.name), + }; try sema.analyzeExport(&block_scope, export_src, options, decl_index); } return type_changed or is_inline != prev_is_inline; @@ -4693,14 +4620,13 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .func => {}, else => { - log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); queue_linker_work = true; }, }, } decl.ty = decl_tv.ty; - decl.val = try decl_tv.val.copy(decl_arena_allocator); + decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); decl.@"align" = blk: { const align_ref = decl.zirAlignRef(mod); if (align_ref == .none) break :blk 0; @@ -4708,14 +4634,15 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { }; decl.@"linksection" = blk: { const linksection_ref = decl.zirLinksectionRef(mod); - if (linksection_ref == .none) break :blk null; + if (linksection_ref == .none) break :blk .none; const bytes = try sema.resolveConstString(&block_scope, section_src, linksection_ref, "linksection must be comptime-known"); if (mem.indexOfScalar(u8, bytes, 0) != null) { return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{}); } else if (bytes.len == 0) { return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); } - break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; + const section = try mod.intern_pool.getOrPutString(gpa, bytes); + break :blk section.toOptional(); }; decl.@"addrspace" = blk: { const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { @@ -4743,7 +4670,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { (queue_linker_work and try sema.typeHasRuntimeBits(decl.ty)); if (has_runtime_bits) { - log.debug("queue linker work for {*} ({s})", .{ decl, decl.name }); // Needed for codegen_decl which will call updateDecl and then the // codegen backend wants full access to the Decl Type. @@ -4759,7 +4685,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; // The scope needs to have the decl in it. - const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) }; + const options: std.builtin.ExportOptions = .{ + .name = mod.intern_pool.stringToSlice(decl.name), + }; try sema.analyzeExport(&block_scope, export_src, options, decl_index); } @@ -4785,10 +4713,6 @@ pub fn declareDeclDependencyType(mod: *Module, depender_index: Decl.Index, depen } } - log.debug("{*} ({s}) depends on {*} ({s})", .{ - depender, depender.name, dependee, dependee.name, - }); - if (dependee.deletion_flag) { dependee.deletion_flag = false; assert(mod.deletion_set.swapRemove(dependee_index)); @@ -5138,6 +5062,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err const namespace = mod.namespacePtr(namespace_index); const gpa = mod.gpa; const zir = namespace.file_scope.zir; + const ip = &mod.intern_pool; // zig fmt: off const is_pub = (flags & 0b0001) != 0; @@ -5157,31 +5082,31 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err // Every Decl needs a name. var is_named_test = false; var kind: Decl.Kind = .named; - const decl_name: [:0]const u8 = switch (decl_name_index) { + const decl_name: InternPool.NullTerminatedString = switch (decl_name_index) { 0 => name: { if (export_bit) { const i = iter.usingnamespace_index; iter.usingnamespace_index += 1; kind = .@"usingnamespace"; - break :name try std.fmt.allocPrintZ(gpa, "usingnamespace_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "usingnamespace_{d}", .{i}); } else { const i = iter.comptime_index; iter.comptime_index += 1; kind = .@"comptime"; - break :name try std.fmt.allocPrintZ(gpa, "comptime_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "comptime_{d}", .{i}); } }, 1 => name: { const i = iter.unnamed_test_index; iter.unnamed_test_index += 1; kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "test_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "test_{d}", .{i}); }, 2 => name: { is_named_test = true; const test_name = zir.nullTerminatedString(decl_doccomment_index); kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "decltest.{s}", .{test_name}); + break :name try ip.getOrPutStringFmt(gpa, "decltest.{s}", .{test_name}); }, else => name: { const raw_name = zir.nullTerminatedString(decl_name_index); @@ -5189,14 +5114,12 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err is_named_test = true; const test_name = zir.nullTerminatedString(decl_name_index + 1); kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "test.{s}", .{test_name}); + break :name try ip.getOrPutStringFmt(gpa, "test.{s}", .{test_name}); } else { - break :name try gpa.dupeZ(u8, raw_name); + break :name try ip.getOrPutString(gpa, raw_name); } }, }; - var must_free_decl_name = true; - defer if (must_free_decl_name) gpa.free(decl_name); const is_exported = export_bit and decl_name_index != 0; if (kind == .@"usingnamespace") try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1); @@ -5204,7 +5127,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err // We create a Decl for it regardless of analysis status. const gop = try namespace.decls.getOrPutContextAdapted( gpa, - @as([]const u8, mem.sliceTo(decl_name, 0)), + decl_name, DeclAdapter{ .mod = mod }, Namespace.DeclContext{ .module = mod }, ); @@ -5214,11 +5137,9 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err const new_decl = mod.declPtr(new_decl_index); new_decl.kind = kind; new_decl.name = decl_name; - must_free_decl_name = false; if (kind == .@"usingnamespace") { namespace.usingnamespace_set.putAssumeCapacity(new_decl_index, is_pub); } - log.debug("scan new {*} ({s}) into {*}", .{ new_decl, decl_name, namespace }); new_decl.src_line = line; gop.key_ptr.* = new_decl_index; // Exported decls, comptime decls, usingnamespace decls, and @@ -5239,7 +5160,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err if (!comp.bin_file.options.is_test) break :blk false; if (decl_pkg != mod.main_pkg) break :blk false; if (comp.test_filter) |test_filter| { - if (mem.indexOf(u8, decl_name, test_filter) == null) { + if (mem.indexOf(u8, ip.stringToSlice(decl_name), test_filter) == null) { break :blk false; } } @@ -5270,7 +5191,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err gpa, src_loc, "duplicate test name: {s}", - .{decl_name}, + .{ip.stringToSlice(decl_name)}, ); errdefer msg.destroy(gpa); try mod.failed_decls.putNoClobber(gpa, decl_index, msg); @@ -5281,7 +5202,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err }; try mod.errNoteNonLazy(other_src_loc, msg, "other test here", .{}); } - log.debug("scan existing {*} ({s}) of {*}", .{ decl, decl.name, namespace }); // Update the AST node of the decl; even if its contents are unchanged, it may // have been re-ordered. decl.src_node = decl_node; @@ -5315,7 +5235,6 @@ pub fn clearDecl( defer tracy.end(); const decl = mod.declPtr(decl_index); - log.debug("clearing {*} ({s})", .{ decl, decl.name }); const gpa = mod.gpa; try mod.deletion_set.ensureUnusedCapacity(gpa, decl.dependencies.count()); @@ -5330,9 +5249,6 @@ pub fn clearDecl( const dep = mod.declPtr(dep_index); dep.removeDependant(decl_index); if (dep.dependants.count() == 0 and !dep.deletion_flag) { - log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ - decl, decl.name, dep, dep.name, - }); // We don't recursively perform a deletion here, because during the update, // another reference to it may turn up. dep.deletion_flag = true; @@ -5387,7 +5303,6 @@ pub fn clearDecl( /// This function is exclusively called for anonymous decls. pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); - log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name }); assert(!mod.declIsRoot(decl_index)); assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); @@ -5415,7 +5330,6 @@ fn markDeclForDeletion(mod: *Module, decl_index: Decl.Index) !void { /// If other decls depend on this decl, they must be aborted first. pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); - log.debug("abortAnonDecl {*} ({s})", .{ decl, decl.name }); assert(!mod.declIsRoot(decl_index)); assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); @@ -5468,21 +5382,20 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void } } if (mod.comp.bin_file.cast(link.File.Elf)) |elf| { - elf.deleteDeclExport(decl_index, exp.options.name); + elf.deleteDeclExport(decl_index, exp.name); } if (mod.comp.bin_file.cast(link.File.MachO)) |macho| { - try macho.deleteDeclExport(decl_index, exp.options.name); + try macho.deleteDeclExport(decl_index, exp.name); } if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| { wasm.deleteDeclExport(decl_index); } if (mod.comp.bin_file.cast(link.File.Coff)) |coff| { - coff.deleteDeclExport(decl_index, exp.options.name); + coff.deleteDeclExport(decl_index, exp.name); } if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| { failed_kv.value.destroy(mod.gpa); } - mod.gpa.free(exp.options.name); mod.gpa.destroy(exp); } export_owners.deinit(mod.gpa); @@ -5497,11 +5410,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - // Use the Decl's arena for captured values. - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); - var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); defer comptime_mutable_decls.deinit(); @@ -5512,7 +5420,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE .mod = mod, .gpa = gpa, .arena = arena, - .perm_arena = decl_arena_allocator, .code = decl.getFileScope(mod).zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -5616,7 +5523,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE } func.state = .in_progress; - log.debug("set {s} to in_progress", .{decl.name}); const last_arg_index = inner_block.instructions.items.len; @@ -5677,7 +5583,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = main_block_index; func.state = .success; - log.debug("set {s} to success", .{decl.name}); // Finally we must resolve the return type and parameter types so that backends // have full access to type information. @@ -5724,7 +5629,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { const decl = mod.declPtr(decl_index); - log.debug("mark outdated {*} ({s})", .{ decl, decl.name }); try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl_index }); if (mod.failed_decls.fetchSwapRemove(decl_index)) |kv| { kv.value.destroy(mod.gpa); @@ -5821,7 +5725,7 @@ pub fn allocateNewDecl( .ty = undefined, .val = undefined, .@"align" = undefined, - .@"linksection" = undefined, + .@"linksection" = .none, .@"addrspace" = .generic, .analysis = .unreferenced, .deletion_flag = false, @@ -5839,25 +5743,20 @@ pub fn allocateNewDecl( return decl_and_index.decl_index; } -/// Get error value for error tag `name`. -pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged(ErrorInt).KV { +pub fn getErrorValue( + mod: *Module, + name: InternPool.NullTerminatedString, +) Allocator.Error!ErrorInt { const gop = try mod.global_error_set.getOrPut(mod.gpa, name); - if (gop.found_existing) { - return std.StringHashMapUnmanaged(ErrorInt).KV{ - .key = gop.key_ptr.*, - .value = gop.value_ptr.*, - }; - } + return @intCast(ErrorInt, gop.index); +} - errdefer assert(mod.global_error_set.remove(name)); - try mod.error_name_list.ensureUnusedCapacity(mod.gpa, 1); - gop.key_ptr.* = try mod.gpa.dupe(u8, name); - gop.value_ptr.* = @intCast(ErrorInt, mod.error_name_list.items.len); - mod.error_name_list.appendAssumeCapacity(gop.key_ptr.*); - return std.StringHashMapUnmanaged(ErrorInt).KV{ - .key = gop.key_ptr.*, - .value = gop.value_ptr.*, - }; +pub fn getErrorValueFromSlice( + mod: *Module, + name: []const u8, +) Allocator.Error!ErrorInt { + const interned_name = try mod.intern_pool.getOrPutString(mod.gpa, name); + return getErrorValue(mod, interned_name); } pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !Decl.Index { @@ -5874,24 +5773,23 @@ pub fn createAnonymousDeclFromDecl( ) !Decl.Index { const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope); errdefer mod.destroyDecl(new_decl_index); - const name = try std.fmt.allocPrintZ(mod.gpa, "{s}__anon_{d}", .{ - src_decl.name, @enumToInt(new_decl_index), + const ip = &mod.intern_pool; + const name = try ip.getOrPutStringFmt(mod.gpa, "{s}__anon_{d}", .{ + ip.stringToSlice(src_decl.name), @enumToInt(new_decl_index), }); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name); return new_decl_index; } -/// Takes ownership of `name` even if it returns an error. pub fn initNewAnonDecl( mod: *Module, new_decl_index: Decl.Index, src_line: u32, namespace: Namespace.Index, typed_value: TypedValue, - name: [:0]u8, + name: InternPool.NullTerminatedString, ) Allocator.Error!void { assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern())); - errdefer mod.gpa.free(name); const new_decl = mod.declPtr(new_decl_index); @@ -5900,7 +5798,7 @@ pub fn initNewAnonDecl( new_decl.ty = typed_value.ty; new_decl.val = typed_value.val; new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; @@ -6330,12 +6228,11 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { // deletion set at this time. for (file.deleted_decls.items) |decl_index| { const decl = mod.declPtr(decl_index); - log.debug("deleted from source: {*} ({s})", .{ decl, decl.name }); // Remove from the namespace it resides in, preserving declaration order. assert(decl.zir_decl_index != 0); _ = mod.namespacePtr(decl.src_namespace).decls.orderedRemoveAdapted( - @as([]const u8, mem.sliceTo(decl.name, 0)), + decl.name, DeclAdapter{ .mod = mod }, ); @@ -6357,7 +6254,7 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { pub fn processExports(mod: *Module) !void { const gpa = mod.gpa; // Map symbol names to `Export` for name collision detection. - var symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{}; + var symbol_exports: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, *Export) = .{}; defer symbol_exports.deinit(gpa); var it = mod.decl_exports.iterator(); @@ -6365,13 +6262,13 @@ pub fn processExports(mod: *Module) !void { const exported_decl = entry.key_ptr.*; const exports = entry.value_ptr.items; for (exports) |new_export| { - const gop = try symbol_exports.getOrPut(gpa, new_export.options.name); + const gop = try symbol_exports.getOrPut(gpa, new_export.name); if (gop.found_existing) { new_export.status = .failed_retryable; try mod.failed_exports.ensureUnusedCapacity(gpa, 1); const src_loc = new_export.getSrcLoc(mod); const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {s}", .{ - new_export.options.name, + mod.intern_pool.stringToSlice(new_export.name), }); errdefer msg.destroy(gpa); const other_export = gop.value_ptr.*; @@ -6408,8 +6305,9 @@ pub fn populateTestFunctions( const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?); const builtin_namespace = mod.namespacePtr(root_decl.src_namespace); + const test_functions_str = try mod.intern_pool.getOrPutString(gpa, "test_functions"); const decl_index = builtin_namespace.decls.getKeyAdapted( - @as([]const u8, "test_functions"), + test_functions_str, DeclAdapter{ .mod = mod }, ).?; { @@ -6443,7 +6341,7 @@ pub fn populateTestFunctions( for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); - const test_decl_name = mem.span(test_decl.name); + const test_decl_name = mod.intern_pool.stringToSlice(test_decl.name); const test_name_decl_index = n: { const test_name_decl_ty = try mod.arrayType(.{ .len = test_decl_name.len, @@ -7156,7 +7054,7 @@ pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc return mod.declPtr(opaque_type.decl).srcLoc(mod); } -pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) ![:0]u8 { +pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) !InternPool.NullTerminatedString { return mod.declPtr(opaque_type.decl).getFullyQualifiedName(mod); } diff --git a/src/Sema.zig b/src/Sema.zig index 620a8a6a28..da8878ed4d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -11,9 +11,6 @@ gpa: Allocator, /// Points to the temporary arena allocator of the Sema. /// This arena will be cleared when the sema is destroyed. arena: Allocator, -/// Points to the arena allocator for the owner_decl. -/// This arena will persist until the decl is invalidated. -perm_arena: Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, @@ -740,7 +737,6 @@ pub const Block = struct { // TODO: migrate Decl alignment to use `InternPool.Alignment` new_decl.@"align" = @intCast(u32, alignment); errdefer sema.mod.abortAnonDecl(new_decl_index); - try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; try sema.mod.finalizeAnonDecl(new_decl_index); return new_decl_index; @@ -1825,6 +1821,20 @@ pub fn resolveConstString( return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); } +pub fn resolveConstStringIntern( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + zir_ref: Zir.Inst.Ref, + reason: []const u8, +) !InternPool.NullTerminatedString { + const air_inst = try sema.resolveInst(zir_ref); + const wanted_type = Type.slice_const_u8; + const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); + const val = try sema.resolveConstValue(block, src, coerced_inst, reason); + return val.toIpString(wanted_type, sema.mod); +} + pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { const air_inst = try sema.resolveInst(zir_ref); assert(air_inst != .var_args_param_type); @@ -1847,11 +1857,13 @@ fn analyzeAsType( pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; if (!mod.backendSupportsFeature(.error_return_trace)) return; assert(!block.is_comptime); var err_trace_block = block.makeSubBlock(); - defer err_trace_block.instructions.deinit(sema.gpa); + defer err_trace_block.instructions.deinit(gpa); const src: LazySrcLoc = .unneeded; @@ -1866,17 +1878,19 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; - const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "instruction_addresses", src, true); + const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses"); + const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true); try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store); // st.index = 0; - const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "index", src, true); + const index_field_name = try ip.getOrPutString(gpa, "index"); + const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true); try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store); // @errorReturnTrace() = &st; _ = try err_trace_block.addUnOp(.set_err_return_trace, st_ptr); - try block.instructions.insertSlice(sema.gpa, last_arg_index, err_trace_block.instructions.items); + try block.instructions.insertSlice(gpa, last_arg_index, err_trace_block.instructions.items); } /// May return Value Tags: `variable`, `undef`. @@ -2179,7 +2193,13 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError return sema.failWithOwnedErrorMsg(msg); } -fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { +fn failWithInvalidFieldAccess( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + object_ty: Type, + field_name: InternPool.NullTerminatedString, +) CompileError { const mod = sema.mod; const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty; @@ -2207,15 +2227,16 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); } -fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: []const u8) bool { +fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.NullTerminatedString) bool { + const ip = &mod.intern_pool; switch (ty.zigTypeTag(mod)) { - .Array => return mem.eql(u8, field_name, "len"), + .Array => return ip.stringEqlSlice(field_name, "len"), .Pointer => { const ptr_info = ty.ptrInfo(mod); if (ptr_info.size == .Slice) { - return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len"); + return ip.stringEqlSlice(field_name, "ptr") or ip.stringEqlSlice(field_name, "len"); } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { - return mem.eql(u8, field_name, "len"); + return ip.stringEqlSlice(field_name, "len"); } else return false; }, .Type, .Struct, .Union => return true, @@ -2308,19 +2329,19 @@ pub fn fail( fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { @setCold(true); const gpa = sema.gpa; + const mod = sema.mod; - if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) { + if (crash_report.is_enabled and mod.comp.debug_compile_errors) { if (err_msg.src_loc.lazy == .unneeded) return error.NeededSourceLocation; var wip_errors: std.zig.ErrorBundle.Wip = undefined; wip_errors.init(gpa) catch unreachable; - Compilation.addModuleErrorMsg(&wip_errors, err_msg.*) catch unreachable; + Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*) catch unreachable; std.debug.print("compile error during Sema:\n", .{}); var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); crash_report.compilerPanic("unexpected compile error occurred", null, null); } - const mod = sema.mod; ref: { errdefer err_msg.destroy(gpa); if (err_msg.src_loc.lazy == .unneeded) { @@ -2330,9 +2351,9 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { try mod.failed_files.ensureUnusedCapacity(gpa, 1); const max_references = blk: { - if (sema.mod.comp.reference_trace) |num| break :blk num; + if (mod.comp.reference_trace) |num| break :blk num; // Do not add multiple traces without explicit request. - if (sema.mod.failed_decls.count() != 0) break :ref; + if (mod.failed_decls.count() != 0) break :ref; break :blk default_reference_trace_len; }; @@ -2350,13 +2371,16 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { if (gop.found_existing) break; if (cur_reference_trace < max_references) { const decl = sema.mod.declPtr(ref.referencer); - try reference_stack.append(.{ .decl = decl.name, .src_loc = ref.src.toSrcLoc(decl, mod) }); + try reference_stack.append(.{ + .decl = decl.name.toOptional(), + .src_loc = ref.src.toSrcLoc(decl, mod), + }); } referenced_by = ref.referencer; } if (sema.mod.comp.reference_trace == null and cur_reference_trace > 0) { try reference_stack.append(.{ - .decl = null, + .decl = .none, .src_loc = undefined, .hidden = 0, }); @@ -2795,7 +2819,6 @@ fn zirStructDecl( new_namespace.ty = struct_ty.toType(); try sema.analyzeStructDecl(new_decl, inst, struct_index); - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; @@ -2812,6 +2835,7 @@ fn createAnonymousDeclTypeNamed( ) !Decl.Index { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const namespace = block.namespace; const src_scope = block.wip_capture_scope; const src_decl = mod.declPtr(block.src_decl); @@ -2827,16 +2851,19 @@ fn createAnonymousDeclTypeNamed( // semantically analyzed. // This name is also used as the key in the parent namespace so it cannot be // renamed. - const name = try std.fmt.allocPrintZ(gpa, "{s}__{s}_{d}", .{ - src_decl.name, anon_prefix, @enumToInt(new_decl_index), - }); - errdefer gpa.free(name); + + // This ensureUnusedCapacity protects against the src_decl slice from being + // reallocated during the call to `getOrPutStringFmt`. + try ip.string_bytes.ensureUnusedCapacity(gpa, ip.stringToSlice(src_decl.name).len + + anon_prefix.len + 20); + const name = ip.getOrPutStringFmt(gpa, "{s}__{s}_{d}", .{ + ip.stringToSlice(src_decl.name), anon_prefix, @enumToInt(new_decl_index), + }) catch unreachable; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, .parent => { - const name = try gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); - errdefer gpa.free(name); + const name = mod.declPtr(block.src_decl).name; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2846,7 +2873,7 @@ fn createAnonymousDeclTypeNamed( var buf = std.ArrayList(u8).init(gpa); defer buf.deinit(); - try buf.appendSlice(mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); + try buf.appendSlice(ip.stringToSlice(mod.declPtr(block.src_decl).name)); try buf.appendSlice("("); var arg_i: usize = 0; @@ -2871,8 +2898,7 @@ fn createAnonymousDeclTypeNamed( }; try buf.appendSlice(")"); - const name = try buf.toOwnedSliceSentinel(0); - errdefer gpa.free(name); + const name = try ip.getOrPutString(gpa, buf.items); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2885,10 +2911,17 @@ fn createAnonymousDeclTypeNamed( .dbg_var_ptr, .dbg_var_val => { if (zir_data[i].str_op.operand != ref) continue; - const name = try std.fmt.allocPrintZ(gpa, "{s}.{s}", .{ - src_decl.name, zir_data[i].str_op.getStr(sema.code), - }); - errdefer gpa.free(name); + // This ensureUnusedCapacity protects against the src_decl + // slice from being reallocated during the call to + // `getOrPutStringFmt`. + const zir_str = zir_data[i].str_op.getStr(sema.code); + try ip.string_bytes.ensureUnusedCapacity( + gpa, + ip.stringToSlice(src_decl.name).len + zir_str.len + 10, + ); + const name = ip.getOrPutStringFmt(gpa, "{s}.{s}", .{ + ip.stringToSlice(src_decl.name), zir_str, + }) catch unreachable; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; @@ -3249,7 +3282,6 @@ fn zirUnionDecl( _ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; @@ -3315,7 +3347,6 @@ fn zirOpaqueDecl( extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; @@ -3344,8 +3375,8 @@ fn zirErrorSetDecl( while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; const name = sema.code.nullTerminatedString(str_index); - const kv = try mod.getErrorValue(name); - const name_ip = try mod.intern_pool.getOrPutString(gpa, kv.key); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + _ = try mod.getErrorValue(name_ip); const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen } @@ -3512,7 +3543,8 @@ fn indexablePtrLen( const is_pointer_to = object_ty.isSinglePointer(mod); const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); - return sema.fieldVal(block, src, object, "len", src); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len"); + return sema.fieldVal(block, src, object, field_name, src); } fn indexablePtrLenOrNone( @@ -3525,7 +3557,8 @@ fn indexablePtrLenOrNone( const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); if (operand_ty.ptrSize(mod) == .Many) return .none; - return sema.fieldVal(block, src, operand, "len", src); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len"); + return sema.fieldVal(block, src, operand, field_name, src); } fn zirAllocExtended( @@ -4079,6 +4112,7 @@ fn zirFieldBasePtr( fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.operands_len); @@ -4122,7 +4156,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (!object_ty.indexableHasLen(mod)) continue; - break :l try sema.fieldVal(block, arg_src, object, "len", arg_src); + break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, "len"), arg_src); }; const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src); if (len == .none) { @@ -4308,6 +4342,7 @@ fn validateUnionInit( union_ptr: Air.Inst.Ref, ) CompileError!void { const mod = sema.mod; + const gpa = sema.gpa; if (instrs.len != 1) { const msg = msg: { @@ -4317,7 +4352,7 @@ fn validateUnionInit( "cannot initialize multiple union fields at once; unions can only have one active field", .{}, ); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); for (instrs[1..]) |inst| { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -4341,7 +4376,7 @@ fn validateUnionInit( const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_ptr_extra.field_name_start)); // Validate the field access but ignore the index since we want the tag enum field index. _ = try sema.unionFieldIndex(block, union_ty, field_name, field_src); const air_tags = sema.air_instructions.items(.tag); @@ -4444,6 +4479,7 @@ fn validateStructInit( ) CompileError!void { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; // Maps field index to field_ptr index of where it was already initialized. const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount(mod)); @@ -4457,7 +4493,10 @@ fn validateStructInit( const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; struct_ptr_zir_ref = field_ptr_extra.lhs; - const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); + const field_name = try ip.getOrPutString( + gpa, + sema.code.nullTerminatedString(field_ptr_extra.field_name_start), + ); const field_index = if (struct_ty.isTuple(mod)) try sema.tupleFieldIndex(block, struct_ty, field_name, field_src) else @@ -4504,7 +4543,7 @@ fn validateStructInit( } const field_name = struct_ty.structFieldName(i, mod); const template = "missing struct field: {s}"; - const args = .{field_name}; + const args = .{ip.stringToSlice(field_name)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -4525,8 +4564,7 @@ fn validateStructInit( if (root_msg) |msg| { if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = try struct_obj.getFullyQualifiedName(mod); - defer gpa.free(fqn); + const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); try mod.errNoteNonLazy( struct_obj.srcLoc(mod), msg, @@ -4649,7 +4687,7 @@ fn validateStructInit( } const field_name = struct_ty.structFieldName(i, mod); const template = "missing struct field: {s}"; - const args = .{field_name}; + const args = .{ip.stringToSlice(field_name)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -4662,10 +4700,9 @@ fn validateStructInit( if (root_msg) |msg| { if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = try struct_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); + const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); try sema.mod.errNoteNonLazy( - struct_obj.srcLoc(sema.mod), + struct_obj.srcLoc(mod), msg, "struct '{s}' declared here", .{fqn}, @@ -4949,7 +4986,7 @@ fn failWithBadMemberAccess( block: *Block, agg_ty: Type, field_src: LazySrcLoc, - field_name: []const u8, + field_name_nts: InternPool.NullTerminatedString, ) CompileError { const mod = sema.mod; const kw_name = switch (agg_ty.zigTypeTag(mod)) { @@ -4959,6 +4996,7 @@ fn failWithBadMemberAccess( .Enum => "enum", else => unreachable, }; + const field_name = mod.intern_pool.stringToSlice(field_name_nts); if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (sema.mod.declIsRoot(some)) { return sema.fail(block, field_src, "root struct of file '{}' has no member named '{s}'", .{ agg_ty.fmt(sema.mod), field_name, @@ -4980,22 +5018,23 @@ fn failWithBadStructFieldAccess( block: *Block, struct_obj: *Module.Struct, field_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, ) CompileError { + const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; - const fqn = try struct_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); + const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); const msg = msg: { const msg = try sema.errMsg( block, field_src, "no field named '{s}' in struct '{s}'", - .{ field_name, fqn }, + .{ ip.stringToSlice(field_name), fqn }, ); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(struct_obj.srcLoc(sema.mod), msg, "struct declared here", .{}); + try mod.errNoteNonLazy(struct_obj.srcLoc(mod), msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -5006,22 +5045,23 @@ fn failWithBadUnionFieldAccess( block: *Block, union_obj: *Module.Union, field_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, ) CompileError { + const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; - const fqn = try union_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); + const fqn = ip.stringToSlice(try union_obj.getFullyQualifiedName(mod)); const msg = msg: { const msg = try sema.errMsg( block, field_src, "no field named '{s}' in union '{s}'", - .{ field_name, fqn }, + .{ ip.stringToSlice(field_name), fqn }, ); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(union_obj.srcLoc(sema.mod), msg, "union declared here", .{}); + try mod.errNoteNonLazy(union_obj.srcLoc(mod), msg, "union declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -5772,7 +5812,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const decl_name = sema.code.nullTerminatedString(extra.decl_name); + const decl_name = try mod.intern_pool.getOrPutString(mod.gpa, sema.code.nullTerminatedString(extra.decl_name)); const decl_index = if (extra.namespace != .none) index_blk: { const container_ty = try sema.resolveType(block, operand_src, extra.namespace); const container_namespace = container_ty.getNamespaceIndex(mod).unwrap().?; @@ -5875,19 +5915,14 @@ pub fn analyzeExport( const new_export = try gpa.create(Export); errdefer gpa.destroy(new_export); - const symbol_name = try gpa.dupe(u8, borrowed_options.name); - errdefer gpa.free(symbol_name); - - const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null; - errdefer if (section) |s| gpa.free(s); + const symbol_name = try mod.intern_pool.getOrPutString(gpa, borrowed_options.name); + const section = try mod.intern_pool.getOrPutStringOpt(gpa, borrowed_options.section); new_export.* = .{ - .options = .{ - .name = symbol_name, - .linkage = borrowed_options.linkage, - .section = section, - .visibility = borrowed_options.visibility, - }, + .name = symbol_name, + .linkage = borrowed_options.linkage, + .section = section, + .visibility = borrowed_options.visibility, .src = src, .owner_decl = sema.owner_decl_index, .src_decl = block.src_decl, @@ -6121,23 +6156,25 @@ fn addDbgVar( } fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); - const decl_name = inst_data.get(sema.code); + const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); const decl_index = try sema.lookupIdentifier(block, src, decl_name); try sema.addReferencedBy(block, src, decl_index); return sema.analyzeDeclRef(decl_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); - const decl_name = inst_data.get(sema.code); + const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); const decl = try sema.lookupIdentifier(block, src, decl_name); return sema.analyzeDeclVal(block, src, decl); } -fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !Decl.Index { +fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !Decl.Index { const mod = sema.mod; var namespace = block.namespace; while (true) { @@ -6156,7 +6193,7 @@ fn lookupInNamespace( block: *Block, src: LazySrcLoc, namespace_index: Namespace.Index, - ident_name: []const u8, + ident_name: InternPool.NullTerminatedString, observe_usingnamespace: bool, ) CompileError!?Decl.Index { const mod = sema.mod; @@ -6249,9 +6286,6 @@ fn lookupInNamespace( return decl_index; } - log.debug("{*} ({s}) depends on non-existence of '{s}' in {*} ({s})", .{ - sema.owner_decl, sema.owner_decl.name, ident_name, namespace_decl, namespace_decl.name, - }); // TODO This dependency is too strong. Really, it should only be a dependency // on the non-existence of `ident_name` in the namespace. We can lessen the number of // outdated declarations by making this dependency more sophisticated. @@ -6276,10 +6310,12 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { } pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const src = sema.src; - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return .none; - if (!sema.mod.comp.bin_file.options.error_return_tracing) return .none; + if (!mod.backendSupportsFeature(.error_return_trace)) return .none; + if (!mod.comp.bin_file.options.error_return_tracing) return .none; if (block.is_comptime) return .none; @@ -6292,7 +6328,8 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, else => |e| return e, }; - const field_index = sema.structFieldIndex(block, stack_trace_ty, "index", src) catch |err| switch (err) { + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, src) catch |err| switch (err) { error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, else => |e| return e, }; @@ -6316,6 +6353,7 @@ fn popErrorReturnTrace( saved_error_trace_index: Air.Inst.Ref, ) CompileError!void { const mod = sema.mod; + const gpa = sema.gpa; var is_non_error: ?bool = null; var is_non_error_inst: Air.Inst.Ref = undefined; if (operand != .none) { @@ -6332,13 +6370,14 @@ fn popErrorReturnTrace( const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, "index", src, stack_trace_ty, true); + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); } else if (is_non_error == null) { // The result might be an error. If it is, we leave the error trace alone. If it isn't, we need // to pop any error trace that may have been propagated from our arguments. - try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Block).Struct.fields.len); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len); const cond_block_inst = try block.addInstAsIndex(.{ .tag = .block, .data = .{ @@ -6350,28 +6389,29 @@ fn popErrorReturnTrace( }); var then_block = block.makeSubBlock(); - defer then_block.instructions.deinit(sema.gpa); + defer then_block.instructions.deinit(gpa); // If non-error, then pop the error return trace by restoring the index. const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, "index", src, stack_trace_ty, true); + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); _ = try then_block.addBr(cond_block_inst, Air.Inst.Ref.void_value); // Otherwise, do nothing var else_block = block.makeSubBlock(); - defer else_block.instructions.deinit(sema.gpa); + defer else_block.instructions.deinit(gpa); _ = try else_block.addBr(cond_block_inst, Air.Inst.Ref.void_value); - try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.CondBr).Struct.fields.len + + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block const cond_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); - try sema.air_instructions.append(sema.gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ + try sema.air_instructions.append(gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = is_non_error_inst, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(u32, then_block.instructions.items.len), @@ -6414,7 +6454,7 @@ fn zirCall( .direct => .{ .direct = try sema.resolveInst(extra.data.callee) }, .field => blk: { const object_ptr = try sema.resolveInst(extra.data.obj_ptr); - const field_name = sema.code.nullTerminatedString(extra.data.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.data.field_name_start)); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; break :blk try sema.fieldCallBind(block, callee_src, object_ptr, field_name, field_name_src); }, @@ -6509,7 +6549,8 @@ fn zirCall( if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError(mod))) { const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const field_index = try sema.structFieldIndex(block, stack_trace_ty, "index", call_src); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index"); + const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); // Insert a save instruction before the arg resolution + call instructions we just generated const save_inst = try block.insertInst(block_index, .{ @@ -7436,9 +7477,10 @@ fn instantiateGenericCall( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = mod.funcPtr(switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + const module_fn = mod.funcPtr(switch (ip.indexToKey(func_val.toIntern())) { .func => |function| function.index, .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.getFunctionIndex(mod).unwrap().?, else => unreachable, @@ -7567,9 +7609,12 @@ fn instantiateGenericCall( const new_decl_index = try mod.allocateNewDecl(namespace_index, fn_owner_decl.src_node, src_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); // TODO better names for generic function instantiations - const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{ - fn_owner_decl.name, @enumToInt(new_decl_index), - }); + // The ensureUnusedCapacity here protects against fn_owner_decl.name slice being + // reallocated during getOrPutStringFmt. + try ip.string_bytes.ensureUnusedCapacity(gpa, ip.stringToSlice(fn_owner_decl.name).len + 20); + const decl_name = ip.getOrPutStringFmt(gpa, "{s}__anon_{d}", .{ + ip.stringToSlice(fn_owner_decl.name), @enumToInt(new_decl_index), + }) catch unreachable; new_decl.name = decl_name; new_decl.src_line = fn_owner_decl.src_line; new_decl.is_pub = fn_owner_decl.is_pub; @@ -7590,12 +7635,8 @@ fn instantiateGenericCall( assert(new_decl.dependencies.keys().len == 0); try mod.declareDeclDependencyType(new_decl_index, module_fn.owner_decl, .function_body); - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const new_func = sema.resolveGenericInstantiationType( block, - new_decl_arena_allocator, fn_zir, new_decl, new_decl_index, @@ -7608,7 +7649,6 @@ fn instantiateGenericCall( bound_arg_src, ) catch |err| switch (err) { error.GenericPoison, error.ComptimeReturn => { - new_decl_arena.deinit(); // Resolving the new function type below will possibly declare more decl dependencies // and so we remove them all here in case of error. for (new_decl.dependencies.keys()) |dep_index| { @@ -7623,10 +7663,6 @@ fn instantiateGenericCall( }, else => { assert(mod.monomorphed_funcs.removeContext(new_module_func_index, .{ .mod = mod })); - { - errdefer new_decl_arena.deinit(); - try new_decl.finalizeNewArena(&new_decl_arena); - } // TODO look up the compile error that happened here and attach a note to it // pointing here, at the generic instantiation callsite. if (sema.owner_func) |owner_func| { @@ -7637,9 +7673,7 @@ fn instantiateGenericCall( return err; }, }; - errdefer new_decl_arena.deinit(); - try new_decl.finalizeNewArena(&new_decl_arena); break :callee new_func; } else gop.key_ptr.*; const callee = mod.funcPtr(callee_index); @@ -7729,7 +7763,6 @@ fn instantiateGenericCall( fn resolveGenericInstantiationType( sema: *Sema, block: *Block, - new_decl_arena_allocator: Allocator, fn_zir: Zir, new_decl: *Decl, new_decl_index: Decl.Index, @@ -7755,7 +7788,6 @@ fn resolveGenericInstantiationType( .mod = mod, .gpa = gpa, .arena = sema.arena, - .perm_arena = new_decl_arena_allocator, .code = fn_zir, .owner_decl = new_decl, .owner_decl_index = new_decl_index, @@ -7764,7 +7796,8 @@ fn resolveGenericInstantiationType( .fn_ret_ty = Type.void, .owner_func = null, .owner_func_index = .none, - .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len), + // TODO: fully migrate functions into InternPool + .comptime_args = try mod.tmp_hack_arena.allocator().alloc(TypedValue, uncasted_args.len), .comptime_args_fn_inst = module_fn.zir_body_inst, .preallocated_new_func = new_module_func.toOptional(), .is_generic_instantiation = true, @@ -7931,10 +7964,6 @@ fn resolveGenericInstantiationType( new_decl.owns_tv = true; new_decl.analysis = .complete; - log.debug("generic function '{s}' instantiated with type {}", .{ - new_decl.name, new_decl.ty.fmtDebug(), - }); - // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field // will be populated, ensuring it will have `analyzeBody` called with the ZIR // parameters mapped appropriately. @@ -8134,13 +8163,13 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! _ = block; const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const name = inst_data.get(sema.code); + const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(name); // Create an error set type with only this error value, and return the value. - const kv = try sema.mod.getErrorValue(name); - const error_set_type = try mod.singleErrorSetType(kv.key); + const error_set_type = try mod.singleErrorSetTypeNts(name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), - .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), + .name = name, } })).toValue()); } @@ -8162,7 +8191,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; return sema.addConstant(Type.err_int, try mod.intValue( Type.err_int, - (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value, + try mod.getErrorValue(err_name), )); } @@ -8173,8 +8202,8 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat switch (names.len) { 0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)), 1 => { - const name = mod.intern_pool.stringToSlice(names[0]); - return sema.addIntUnsigned(Type.err_int, mod.global_error_set.get(name).?); + const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(names[0]).?); + return sema.addIntUnsigned(Type.err_int, int); }, else => {}, } @@ -8197,11 +8226,11 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod)); - if (int > sema.mod.global_error_set.count() or int == 0) + if (int > mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); return sema.addConstant(Type.anyerror, (try mod.intern(.{ .err = .{ .ty = .anyerror_type, - .name = mod.intern_pool.getString(sema.mod.error_name_list.items[int]).unwrap().?, + .name = mod.global_error_set.keys()[int], } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -8917,7 +8946,7 @@ fn handleExternLibName( const FuncLinkSection = union(enum) { generic, default, - explicit: []const u8, + explicit: InternPool.NullTerminatedString, }; fn funcCommon( @@ -9186,9 +9215,9 @@ fn funcCommon( }; sema.owner_decl.@"linksection" = switch (section) { - .generic => undefined, - .default => null, - .explicit => |section_name| try sema.perm_arena.dupeZ(u8, section_name), + .generic => .none, + .default => .none, + .explicit => |section_name| section_name.toOptional(), }; sema.owner_decl.@"align" = alignment orelse 0; sema.owner_decl.@"addrspace" = address_space orelse .generic; @@ -9572,11 +9601,12 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start)); const object = try sema.resolveInst(extra.lhs); return sema.fieldVal(block, src, object, field_name, field_name_src); } @@ -9585,11 +9615,12 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index, initializing: b const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start)); const object_ptr = try sema.resolveInst(extra.lhs); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, initializing); } @@ -9603,7 +9634,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object = try sema.resolveInst(extra.lhs); - const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, "field name must be comptime-known"); return sema.fieldVal(block, src, object, field_name, field_name_src); } @@ -9616,7 +9647,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object_ptr = try sema.resolveInst(extra.lhs); - const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, "field name must be comptime-known"); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false); } @@ -10434,6 +10465,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const src_node_offset = inst_data.src_node; @@ -10605,7 +10637,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError i, msg, "unhandled enumeration value: '{s}'", - .{field_name}, + .{ip.stringToSlice(field_name)}, ); } try mod.errNoteNonLazy( @@ -10689,7 +10721,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); for (operand_ty.errorSetNames(mod)) |error_name_ip| { - const error_name = mod.intern_pool.stringToSlice(error_name_ip); + const error_name = ip.stringToSlice(error_name_ip); if (!seen_errors.contains(error_name) and special_prong != .@"else") { const msg = maybe_msg orelse blk: { maybe_msg = try sema.errMsg( @@ -10758,7 +10790,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, error_names.len); for (error_names) |error_name_ip| { - const error_name = mod.intern_pool.stringToSlice(error_name_ip); + const error_name = ip.stringToSlice(error_name_ip); if (seen_errors.contains(error_name)) continue; names.putAssumeCapacityNoClobber(error_name_ip, {}); @@ -12062,7 +12094,7 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs); - const field_name = try sema.resolveConstString(block, name_src, extra.rhs, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, "field name must be comptime-known"); const ty = try sema.resolveTypeFields(unresolved_ty); const ip = &mod.intern_pool; @@ -12070,19 +12102,17 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .Slice => { - if (mem.eql(u8, field_name, "ptr")) break :hf true; - if (mem.eql(u8, field_name, "len")) break :hf true; + if (ip.stringEqlSlice(field_name, "ptr")) break :hf true; + if (ip.stringEqlSlice(field_name, "len")) break :hf true; break :hf false; }, else => {}, }, .anon_struct_type => |anon_struct| { if (anon_struct.names.len != 0) { - // If the string is not interned, then the field certainly is not present. - const name_interned = ip.getString(field_name).unwrap() orelse break :hf false; - break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, name_interned) != null; + break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, field_name) != null; } else { - const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; + const field_index = std.fmt.parseUnsigned(u32, ip.stringToSlice(field_name), 10) catch break :hf false; break :hf field_index < ty.structFieldCount(mod); } }, @@ -12097,11 +12127,9 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai break :hf union_obj.fields.contains(field_name); }, .enum_type => |enum_type| { - // If the string is not interned, then the field certainly is not present. - const name_interned = ip.getString(field_name).unwrap() orelse break :hf false; - break :hf enum_type.nameIndex(ip, name_interned) != null; + break :hf enum_type.nameIndex(ip, field_name) != null; }, - .array_type => break :hf mem.eql(u8, field_name, "len"), + .array_type => break :hf ip.stringEqlSlice(field_name, "len"), else => {}, } return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ @@ -12123,7 +12151,7 @@ fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const container_type = try sema.resolveType(block, lhs_src, extra.lhs); - const decl_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "decl name must be comptime-known"); + const decl_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, "decl name must be comptime-known"); try sema.checkNamespaceType(block, lhs_src, container_type); @@ -12218,14 +12246,12 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const err_name = inst_data.get(sema.code); - - // Return the error code from the function. - const kv = try mod.getErrorValue(err_name); - const error_set_type = try mod.singleErrorSetType(kv.key); + const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(name); + const error_set_type = try mod.singleErrorSetTypeNts(name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), - .name = mod.intern_pool.getString(kv.key).unwrap().?, + .name = name, } })).toValue()); } @@ -15730,12 +15756,7 @@ fn zirThis( return sema.analyzeDeclVal(block, src, this_decl_index); } -fn zirClosureCapture( - sema: *Sema, - block: *Block, - inst: Zir.Inst.Index, -) CompileError!void { - // TODO: Compile error when closed over values are modified +fn zirClosureCapture(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_tok; // Closures are not necessarily constant values. For example, the // code might do something like this: @@ -15754,13 +15775,8 @@ fn zirClosureCapture( try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, capture); } -fn zirClosureGet( - sema: *Sema, - block: *Block, - inst: Zir.Inst.Index, -) CompileError!Air.Inst.Ref { +fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; - // TODO CLOSURE: Test this with inline functions const inst_data = sema.code.instructions.items(.data)[inst].inst_node; var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?; // Note: The target closure must be in this scope list. @@ -15896,7 +15912,7 @@ fn zirBuiltinSrc( const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = mem.span(fn_owner_decl.name); + const name = mod.intern_pool.stringToSlice(fn_owner_decl.name); const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, @@ -15965,6 +15981,7 @@ fn zirBuiltinSrc( fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -15995,7 +16012,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Fn", + try ip.getOrPutString(gpa, "Fn"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); try sema.ensureDeclAnalyzed(fn_info_decl_index); @@ -16006,7 +16023,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, fn_info_ty.getNamespaceIndex(mod).unwrap().?, - "Param", + try ip.getOrPutString(gpa, "Param"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); try sema.ensureDeclAnalyzed(param_info_decl_index); @@ -16018,8 +16035,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = mod.typeToFunc(ty).?; const param_ty = info.param_types[i]; const is_generic = param_ty == .generic_poison_type; - const param_ty_val = try mod.intern_pool.get(gpa, .{ .opt = .{ - .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), + const param_ty_val = try ip.get(gpa, .{ .opt = .{ + .ty = try ip.get(gpa, .{ .opt_type = .type_type }), .val = if (is_generic) .none else param_ty, } }); @@ -16070,7 +16087,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const info = mod.typeToFunc(ty).?; const ret_ty_opt = try mod.intern(.{ .opt = .{ - .ty = try mod.intern_pool.get(gpa, .{ .opt_type = .type_type }), + .ty = try ip.get(gpa, .{ .opt_type = .type_type }), .val = if (info.return_type == .generic_poison_type) .none else info.return_type, } }); @@ -16104,7 +16121,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Int", + try ip.getOrPutString(gpa, "Int"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, int_info_decl_index); try sema.ensureDeclAnalyzed(int_info_decl_index); @@ -16133,7 +16150,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Float", + try ip.getOrPutString(gpa, "Float"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index); try sema.ensureDeclAnalyzed(float_info_decl_index); @@ -16166,7 +16183,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, - "Pointer", + try ip.getOrPutString(gpa, "Pointer"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); @@ -16178,7 +16195,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, pointer_ty.getNamespaceIndex(mod).unwrap().?, - "Size", + try ip.getOrPutString(gpa, "Size"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); @@ -16219,7 +16236,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Array", + try ip.getOrPutString(gpa, "Array"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, array_field_ty_decl_index); try sema.ensureDeclAnalyzed(array_field_ty_decl_index); @@ -16251,7 +16268,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Vector", + try ip.getOrPutString(gpa, "Vector"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, vector_field_ty_decl_index); try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); @@ -16281,7 +16298,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Optional", + try ip.getOrPutString(gpa, "Optional"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, optional_field_ty_decl_index); try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); @@ -16312,7 +16329,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Error", + try ip.getOrPutString(gpa, "Error"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); @@ -16332,7 +16349,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const names = ty.errorSetNames(mod); const vals = try sema.arena.alloc(InternPool.Index, names.len); for (vals, names) |*field_val, name_ip| { - const name = mod.intern_pool.stringToSlice(name_ip); + const name = ip.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16415,7 +16432,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "ErrorUnion", + try ip.getOrPutString(gpa, "ErrorUnion"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, error_union_field_ty_decl_index); try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); @@ -16440,7 +16457,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Enum => { // TODO: look into memoizing this result. - const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; const is_exhaustive = Value.makeBool(enum_type.tag_mode != .nonexhaustive); @@ -16452,7 +16469,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "EnumField", + try ip.getOrPutString(gpa, "EnumField"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); @@ -16462,8 +16479,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const enum_field_vals = try sema.arena.alloc(InternPool.Index, enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { - const name_ip = mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names[i]; - const name = mod.intern_pool.stringToSlice(name_ip); + const name_ip = ip.indexToKey(ty.toIntern()).enum_type.names[i]; + const name = ip.stringToSlice(name_ip); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16532,7 +16549,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Enum", + try ip.getOrPutString(gpa, "Enum"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, type_enum_ty_decl_index); try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); @@ -16570,7 +16587,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Union", + try ip.getOrPutString(gpa, "Union"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, type_union_ty_decl_index); try sema.ensureDeclAnalyzed(type_union_ty_decl_index); @@ -16583,7 +16600,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "UnionField", + try ip.getOrPutString(gpa, "UnionField"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); @@ -16601,7 +16618,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai for (union_field_vals, 0..) |*field_val, i| { const field = union_fields.values()[i]; - const name = union_fields.keys()[i]; + const name = ip.stringToSlice(union_fields.keys()[i]); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16682,7 +16699,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, - "ContainerLayout", + try ip.getOrPutString(gpa, "ContainerLayout"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); @@ -16721,7 +16738,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Struct", + try ip.getOrPutString(gpa, "Struct"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, type_struct_ty_decl_index); try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); @@ -16734,7 +16751,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "StructField", + try ip.getOrPutString(gpa, "StructField"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); @@ -16749,11 +16766,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var struct_field_vals: []InternPool.Index = &.{}; defer gpa.free(struct_field_vals); fv: { - const struct_type = switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + const struct_type = switch (ip.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |tuple| { struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); for (struct_field_vals, 0..) |*struct_field_val, i| { - const anon_struct_type = mod.intern_pool.indexToKey(struct_ty.toIntern()).anon_struct_type; + const anon_struct_type = ip.indexToKey(struct_ty.toIntern()).anon_struct_type; const field_ty = anon_struct_type.types[i]; const field_val = anon_struct_type.values[i]; const name_val = v: { @@ -16761,7 +16778,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer anon_decl.deinit(); const bytes = if (tuple.names.len != 0) // https://github.com/ziglang/zig/issues/15709 - @as([]const u8, mod.intern_pool.stringToSlice(tuple.names[i])) + @as([]const u8, ip.stringToSlice(tuple.names[i])) else try std.fmt.allocPrint(sema.arena, "{d}", .{i}); const new_decl_ty = try mod.arrayType(.{ @@ -16815,7 +16832,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai struct_field_vals, struct_obj.fields.keys(), struct_obj.fields.values(), - ) |*field_val, name, field| { + ) |*field_val, name_nts, field| { + const name = ip.stringToSlice(name_nts); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16838,10 +16856,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; - const opt_default_val = if (field.default_val.toIntern() == .unreachable_value) + const opt_default_val = if (field.default_val == .none) null else - field.default_val; + field.default_val.toValue(); const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); const alignment = field.alignment(mod, layout); @@ -16908,7 +16926,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, - "ContainerLayout", + try ip.getOrPutString(gpa, "ContainerLayout"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); @@ -16945,7 +16963,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Opaque", + try ip.getOrPutString(gpa, "Opaque"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, type_opaque_ty_decl_index); try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); @@ -16982,6 +17000,8 @@ fn typeInfoDecls( opt_namespace: Module.Namespace.OptionalIndex, ) CompileError!InternPool.Index { const mod = sema.mod; + const gpa = sema.gpa; + var decls_anon_decl = try block.startAnonDecl(); defer decls_anon_decl.deinit(); @@ -16990,7 +17010,7 @@ fn typeInfoDecls( block, src, type_info_ty.getNamespaceIndex(mod).unwrap().?, - "Declaration", + try mod.intern_pool.getOrPutString(gpa, "Declaration"), )).?; try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); @@ -16999,10 +17019,10 @@ fn typeInfoDecls( }; try sema.queueFullTypeResolution(declaration_ty); - var decl_vals = std.ArrayList(InternPool.Index).init(sema.gpa); + var decl_vals = std.ArrayList(InternPool.Index).init(gpa); defer decl_vals.deinit(); - var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa); + var seen_namespaces = std.AutoHashMap(*Namespace, void).init(gpa); defer seen_namespaces.deinit(); if (opt_namespace.unwrap()) |namespace_index| { @@ -17061,7 +17081,7 @@ fn typeInfoNamespaceDecls( const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = mem.span(decl.name); + const name = mod.intern_pool.stringToSlice(decl.name); const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, @@ -17696,15 +17716,14 @@ fn zirRetErrValue( ) CompileError!Zir.Inst.Index { const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const err_name = inst_data.get(sema.code); + const err_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(err_name); const src = inst_data.src(); - // Return the error code from the function. - const kv = try mod.getErrorValue(err_name); - const error_set_type = try mod.singleErrorSetType(err_name); + const error_set_type = try mod.singleErrorSetTypeNts(err_name); const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), - .name = try mod.intern_pool.getOrPutString(sema.gpa, kv.key), + .name = err_name, } })).toValue()); return sema.analyzeRet(block, result_inst, src); } @@ -18177,7 +18196,7 @@ fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const init_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data; const union_ty = try sema.resolveType(block, ty_src, extra.union_type); - const field_name = try sema.resolveConstString(block, field_src, extra.field_name, "name of field being initialized must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, "name of field being initialized must be comptime-known"); const init = try sema.resolveInst(extra.init); return sema.unionInit(block, init, init_src, union_ty, ty_src, field_name, field_src); } @@ -18189,7 +18208,7 @@ fn unionInit( init_src: LazySrcLoc, union_ty: Type, union_ty_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { const mod = sema.mod; @@ -18257,7 +18276,7 @@ fn zirStructInit( const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start)); const field_index = if (resolved_ty.isTuple(mod)) try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src) else @@ -18298,7 +18317,7 @@ fn zirStructInit( const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start)); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); @@ -18347,12 +18366,12 @@ fn finishStructInit( is_ref: bool, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - const gpa = sema.gpa; + const ip = &mod.intern_pool; var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + switch (ip.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct| { for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| { if (field_inits[i] != .none) continue; @@ -18366,9 +18385,9 @@ fn finishStructInit( root_msg = try sema.errMsg(block, init_src, template, .{i}); } } else { - const field_name = mod.intern_pool.stringToSlice(anon_struct.names[i]); + const field_name = anon_struct.names[i]; const template = "missing struct field: {s}"; - const args = .{field_name}; + const args = .{ip.stringToSlice(field_name)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -18385,17 +18404,17 @@ fn finishStructInit( for (struct_obj.fields.values(), 0..) |field, i| { if (field_inits[i] != .none) continue; - if (field.default_val.toIntern() == .unreachable_value) { + if (field.default_val == .none) { const field_name = struct_obj.fields.keys()[i]; const template = "missing struct field: {s}"; - const args = .{field_name}; + const args = .{ip.stringToSlice(field_name)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { root_msg = try sema.errMsg(block, init_src, template, args); } } else { - field_inits[i] = try sema.addConstant(field.ty, field.default_val); + field_inits[i] = try sema.addConstant(field.ty, field.default_val.toValue()); } } }, @@ -18404,10 +18423,9 @@ fn finishStructInit( if (root_msg) |msg| { if (mod.typeToStruct(struct_ty)) |struct_obj| { - const fqn = try struct_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); - try sema.mod.errNoteNonLazy( - struct_obj.srcLoc(sema.mod), + const fqn = ip.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); + try mod.errNoteNonLazy( + struct_obj.srcLoc(mod), msg, "struct '{s}' declared here", .{fqn}, @@ -18826,11 +18844,13 @@ fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const ty_src = inst_data.src(); const field_src = inst_data.src(); const aggregate_ty = try sema.resolveType(block, ty_src, extra.container_type); - const field_name = try sema.resolveConstString(block, field_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, "field name must be comptime-known"); return sema.fieldType(block, aggregate_ty, field_name, field_src, ty_src); } fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const ty_src = inst_data.src(); @@ -18843,7 +18863,8 @@ fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A error.GenericPoison => return Air.Inst.Ref.generic_poison_type, else => |e| return e, }; - const field_name = sema.code.nullTerminatedString(extra.name_start); + const zir_field_name = sema.code.nullTerminatedString(extra.name_start); + const field_name = try ip.getOrPutString(sema.gpa, zir_field_name); return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src); } @@ -18851,7 +18872,7 @@ fn fieldType( sema: *Sema, block: *Block, aggregate_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { @@ -19050,13 +19071,14 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const mod = sema.mod; + const ip = &mod.intern_pool; try sema.resolveTypeLayout(operand_ty); const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); - const tag_name = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; - const bytes = mod.intern_pool.stringToSlice(tag_name); + const tag_name = ip.indexToKey(val.toIntern()).enum_literal; + const bytes = ip.stringToSlice(tag_name); return sema.addStrLit(block, bytes); }, .Enum => operand_ty, @@ -19089,7 +19111,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const enum_decl = mod.declPtr(enum_decl_index); const msg = msg: { const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{s}'", .{ - val.fmtValue(enum_ty, sema.mod), enum_decl.name, + val.fmtValue(enum_ty, sema.mod), ip.stringToSlice(enum_decl.name), }); errdefer msg.destroy(sema.gpa); try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{}); @@ -19098,7 +19120,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); }; const field_name = enum_ty.enumFieldName(field_index, mod); - return sema.addStrLit(block, field_name); + return sema.addStrLit(block, ip.stringToSlice(field_name)); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety() and sema.mod.backendSupportsFeature(.is_named_enum_value)) { @@ -19119,6 +19141,7 @@ fn zirReify( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -19127,11 +19150,10 @@ fn zirReify( const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstValue(block, operand_src, type_info, "operand to @Type must be comptime-known"); - const union_val = mod.intern_pool.indexToKey(val.toIntern()).un; + const union_val = ip.indexToKey(val.toIntern()).un; const target = mod.getTarget(); if (try union_val.val.toValue().anyUndef(mod)) return sema.failWithUseOfUndef(block, src); const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag.toValue(), mod).?; - const ip = &mod.intern_pool; switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, @@ -19145,8 +19167,14 @@ fn zirReify( .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const signedness_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("signedness").?); - const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("bits").?); + const signedness_val = try union_val.val.toValue().fieldValue( + mod, + fields.getIndex(try ip.getOrPutString(gpa, "signedness")).?, + ); + const bits_val = try union_val.val.toValue().fieldValue( + mod, + fields.getIndex(try ip.getOrPutString(gpa, "bits")).?, + ); const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); @@ -19155,8 +19183,12 @@ fn zirReify( }, .Vector => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("len").?); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); + const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "len"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); const len = @intCast(u32, len_val.toUnsignedInt(mod)); const child_ty = child_val.toType(); @@ -19171,7 +19203,9 @@ fn zirReify( }, .Float => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("bits").?); + const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "bits"), + ).?); const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { @@ -19186,14 +19220,30 @@ fn zirReify( }, .Pointer => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("size").?); - const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_const").?); - const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_volatile").?); - const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("alignment").?); - const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("address_space").?); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); - const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_allowzero").?); - const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("sentinel").?); + const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "size"), + ).?); + const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_const"), + ).?); + const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_volatile"), + ).?); + const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); + const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "address_space"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); + const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_allowzero"), + ).?); + const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "sentinel"), + ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); @@ -19279,9 +19329,15 @@ fn zirReify( }, .Array => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("len").?); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); - const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("sentinel").?); + const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "len"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); + const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "sentinel"), + ).?); const len = len_val.toUnsignedInt(mod); const child_ty = child_val.toType(); @@ -19298,7 +19354,9 @@ fn zirReify( }, .Optional => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("child").?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); const child_ty = child_val.toType(); @@ -19307,8 +19365,12 @@ fn zirReify( }, .ErrorUnion => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("error_set").?); - const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("payload").?); + const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "error_set"), + ).?); + const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "payload"), + ).?); const error_set_ty = error_set_val.toType(); const payload_ty = payload_val.toType(); @@ -19330,14 +19392,17 @@ fn zirReify( for (0..len) |i| { const elem_val = try payload_val.elemValue(mod, i); const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); - const name_str = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); - const kv = try mod.getErrorValue(name_str); - const name_ip = try mod.intern_pool.getOrPutString(gpa, kv.key); - const gop = names.getOrPutAssumeCapacity(name_ip); + const name = try name_val.toIpString(Type.slice_const_u8, mod); + _ = try mod.getErrorValue(name); + const gop = names.getOrPutAssumeCapacity(name); if (gop.found_existing) { - return sema.fail(block, src, "duplicate error '{s}'", .{name_str}); + return sema.fail(block, src, "duplicate error '{s}'", .{ + ip.stringToSlice(name), + }); } } @@ -19346,11 +19411,21 @@ fn zirReify( }, .Struct => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("layout").?); - const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("backing_integer").?); - const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("fields").?); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); - const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_tuple").?); + const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "layout"), + ).?); + const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "backing_integer"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); + const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_tuple"), + ).?); const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); @@ -19367,10 +19442,18 @@ fn zirReify( }, .Enum => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("tag_type").?); - const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("fields").?); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); - const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_exhaustive").?); + const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "tag_type"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); + const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_exhaustive"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19396,7 +19479,7 @@ fn zirReify( // Define our empty enum decl const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod))); - const incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{ + const incomplete_enum = try ip.getIncompleteEnum(gpa, .{ .decl = new_decl_index, .namespace = .none, .fields_len = fields_len, @@ -19407,35 +19490,36 @@ fn zirReify( .explicit, .tag_ty = int_tag_ty.toIntern(), }); - errdefer mod.intern_pool.remove(incomplete_enum.index); + errdefer ip.remove(incomplete_enum.index); new_decl.val = incomplete_enum.index.toValue(); for (0..fields_len) |field_i| { const elem_val = try fields_val.elemValue(mod, field_i); const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); - const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex("value").?); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "value"), + ).?); - const field_name = try name_val.toAllocatedBytes( - Type.slice_const_u8, - sema.arena, - mod, - ); - const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); if (!try sema.intFitsInType(value_val, int_tag_ty, null)) { // TODO: better source location return sema.fail(block, src, "field '{s}' with enumeration value '{}' is too large for backing int type '{}'", .{ - field_name, + ip.stringToSlice(field_name), value_val.fmtValue(Type.comptime_int, mod), int_tag_ty.fmt(mod), }); } - if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name_ip)) |other_index| { + if (try incomplete_enum.addFieldName(ip, gpa, field_name)) |other_index| { const msg = msg: { - const msg = try sema.errMsg(block, src, "duplicate enum field '{s}'", .{field_name}); + const msg = try sema.errMsg(block, src, "duplicate enum field '{s}'", .{ + ip.stringToSlice(field_name), + }); errdefer msg.destroy(gpa); _ = other_index; // TODO: this note is incorrect try sema.errNote(block, src, msg, "other field here", .{}); @@ -19444,7 +19528,7 @@ fn zirReify( return sema.failWithOwnedErrorMsg(msg); } - if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| { + if (try incomplete_enum.addFieldValue(ip, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| { const msg = msg: { const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)}); errdefer msg.destroy(gpa); @@ -19462,7 +19546,9 @@ fn zirReify( }, .Opaque => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19496,22 +19582,29 @@ fn zirReify( .decl = new_decl_index, .namespace = new_namespace_index, } }); - errdefer mod.intern_pool.remove(opaque_ty); + errdefer ip.remove(opaque_ty); new_decl.val = opaque_ty.toValue(); new_namespace.ty = opaque_ty.toType(); - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; }, .Union => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("layout").?); - const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("tag_type").?); - const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("fields").?); - const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("decls").?); + const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "layout"), + ).?); + const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "tag_type"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { @@ -19555,7 +19648,7 @@ fn zirReify( const union_obj = mod.unionPtr(union_index); errdefer mod.destroyUnion(union_index); - const union_ty = try mod.intern_pool.get(gpa, .{ .union_type = .{ + const union_ty = try ip.get(gpa, .{ .union_type = .{ .index = union_index, .runtime_tag = if (!tag_type_val.isNull(mod)) .tagged @@ -19566,7 +19659,7 @@ fn zirReify( .ReleaseFast, .ReleaseSmall => .none, }, } }); - errdefer mod.intern_pool.remove(union_ty); + errdefer ip.remove(union_ty); new_decl.val = union_ty.toValue(); new_namespace.ty = union_ty.toType(); @@ -19579,7 +19672,7 @@ fn zirReify( if (tag_type_val.optionalValue(mod)) |payload_val| { union_obj.tag_ty = payload_val.toType(); - const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.toIntern())) { + const enum_type = switch (ip.indexToKey(union_obj.tag_ty.toIntern())) { .enum_type => |x| x, else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}), }; @@ -19597,26 +19690,26 @@ fn zirReify( for (0..fields_len) |i| { const elem_val = try fields_val.elemValue(mod, i); const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); - const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex("type").?); - const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex("alignment").?); - - const field_name = try name_val.toAllocatedBytes( - Type.slice_const_u8, - new_decl_arena_allocator, - mod, - ); - - const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); + const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); + + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); if (enum_field_names.len != 0) { - enum_field_names[i] = field_name_ip; + enum_field_names[i] = field_name; } if (explicit_enum_info) |tag_info| { - const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse { + const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { - const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(mod) }); + const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ ip.stringToSlice(field_name), union_obj.tag_ty.fmt(mod) }); errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; @@ -19632,7 +19725,7 @@ fn zirReify( const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location - return sema.fail(block, src, "duplicate union field {s}", .{field_name}); + return sema.fail(block, src, "duplicate union field {s}", .{ip.stringToSlice(field_name)}); } const field_ty = type_val.toType(); @@ -19688,7 +19781,7 @@ fn zirReify( for (tag_info.names, 0..) |field_name, field_index| { if (explicit_tags_seen[field_index]) continue; try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{ - mod.intern_pool.stringToSlice(field_name), + ip.stringToSlice(field_name), }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -19700,19 +19793,30 @@ fn zirReify( union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, enum_field_names, null); } - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; }, .Fn => { const fields = ip.typeOf(union_val.val).toType().structFields(mod); - const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("calling_convention").?); - const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("alignment").?); - const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_generic").?); - const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("is_var_args").?); - const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("return_type").?); - const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex("params").?); + const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "calling_convention"), + ).?); + const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); + const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_generic"), + ).?); + const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_var_args"), + ).?); + const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "return_type"), + ).?); + const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "params"), + ).?); const is_generic = is_generic_val.toBool(); if (is_generic) { @@ -19746,9 +19850,15 @@ fn zirReify( for (param_types, 0..) |*param_type, i| { const elem_val = try params_val.elemValue(mod, i); const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); - const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex("is_generic").?); - const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex("is_noalias").?); - const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex("type").?); + const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_generic"), + ).?); + const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_noalias"), + ).?); + const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); if (param_is_generic_val.toBool()) { return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{}); @@ -19801,6 +19911,7 @@ fn reifyStruct( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; var new_decl_arena = std.heap.ArenaAllocator.init(gpa); errdefer new_decl_arena.deinit(); @@ -19839,11 +19950,11 @@ fn reifyStruct( const struct_obj = mod.structPtr(struct_index); errdefer mod.destroyStruct(struct_index); - const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + const struct_ty = try ip.get(gpa, .{ .struct_type = .{ .index = struct_index.toOptional(), .namespace = new_namespace_index.toOptional(), } }); - errdefer mod.intern_pool.remove(struct_ty); + errdefer ip.remove(struct_ty); new_decl.val = struct_ty.toValue(); new_namespace.ty = struct_ty.toType(); @@ -19854,12 +19965,22 @@ fn reifyStruct( var i: usize = 0; while (i < fields_len) : (i += 1) { const elem_val = try fields_val.elemValue(mod, i); - const elem_fields = mod.intern_pool.typeOf(elem_val.toIntern()).toType().structFields(mod); - const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex("name").?); - const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex("type").?); - const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex("default_value").?); - const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex("is_comptime").?); - const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex("alignment").?); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); + const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "default_value"), + ).?); + const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_comptime"), + ).?); + const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); @@ -19874,19 +19995,15 @@ fn reifyStruct( return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}); } - const field_name = try name_val.toAllocatedBytes( - Type.slice_const_u8, - new_decl_arena_allocator, - mod, - ); + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); if (is_tuple) { - const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch { + const field_index = std.fmt.parseUnsigned(u32, ip.stringToSlice(field_name), 10) catch { return sema.fail( block, src, "tuple cannot have non-numeric field '{s}'", - .{field_name}, + .{ip.stringToSlice(field_name)}, ); }; @@ -19902,16 +20019,16 @@ fn reifyStruct( const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location - return sema.fail(block, src, "duplicate struct field {s}", .{field_name}); + return sema.fail(block, src, "duplicate struct field {s}", .{ip.stringToSlice(field_name)}); } const field_ty = type_val.toType(); const default_val = if (default_value_val.optionalValue(mod)) |opt_val| - try sema.pointerDeref(block, src, opt_val, try mod.singleConstPtrType(field_ty)) orelse - return sema.failWithNeededComptime(block, src, "struct field default value must be comptime-known") + (try sema.pointerDeref(block, src, opt_val, try mod.singleConstPtrType(field_ty)) orelse + return sema.failWithNeededComptime(block, src, "struct field default value must be comptime-known")).toIntern() else - Value.@"unreachable"; - if (is_comptime_val.toBool() and default_val.toIntern() == .unreachable_value) { + .none; + if (is_comptime_val.toBool() and default_val == .none) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } @@ -20000,7 +20117,6 @@ fn reifyStruct( struct_obj.status = .have_layout; } - try new_decl.finalizeNewArena(&new_decl_arena); const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); try mod.finalizeAnonDecl(new_decl_index); return decl_val; @@ -20871,7 +20987,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty = try sema.resolveType(block, lhs_src, extra.lhs); - const field_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "name of field must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, "name of field must be comptime-known"); const mod = sema.mod; try sema.resolveTypeLayout(ty); @@ -20889,7 +21005,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 } const field_index = if (ty.isTuple(mod)) blk: { - if (mem.eql(u8, field_name, "len")) { + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { return sema.fail(block, src, "no offset available for 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src); @@ -21351,6 +21467,8 @@ fn resolveExportOptions( zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExportOptions { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const export_options_ty = try sema.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -21360,16 +21478,16 @@ fn resolveExportOptions( const section_src = sema.maybeOptionsSrc(block, src, "section"); const visibility_src = sema.maybeOptionsSrc(block, src, "visibility"); - const name_operand = try sema.fieldVal(block, src, options, "name", name_src); + const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); const name_ty = Type.slice_const_u8; const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); - const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); + const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known"); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const section_operand = try sema.fieldVal(block, src, options, "section", section_src); + const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "section"), section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); const section_ty = Type.slice_const_u8; const section = if (section_opt_val.optionalValue(mod)) |section_val| @@ -21377,7 +21495,7 @@ fn resolveExportOptions( else null; - const visibility_operand = try sema.fieldVal(block, src, options, "visibility", visibility_src); + const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "visibility"), visibility_src); const visibility_val = try sema.resolveConstValue(block, visibility_src, visibility_operand, "visibility of exported value must be comptime-known"); const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val); @@ -22217,10 +22335,11 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const parent_ty = try sema.resolveType(block, ty_src, extra.parent_type); - const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, name_src, extra.field_name, "field name must be comptime-known"); const field_ptr = try sema.resolveInst(extra.field_ptr); const field_ptr_ty = sema.typeOf(field_ptr); const mod = sema.mod; + const ip = &mod.intern_pool; if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) { return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)}); @@ -22230,7 +22349,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const field_index = switch (parent_ty.zigTypeTag(mod)) { .Struct => blk: { if (parent_ty.isTuple(mod)) { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, name_src); @@ -22276,7 +22395,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { - const field = switch (mod.intern_pool.indexToKey(field_ptr_val.toIntern())) { + const field = switch (ip.indexToKey(field_ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .field => |field| field, else => null, @@ -22291,7 +22410,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr src, "field '{s}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{ - field_name, + ip.stringToSlice(field_name), field_index, field.index, parent_ty.fmt(sema.mod), @@ -22807,6 +22926,8 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -22824,7 +22945,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_elem_ty = dest_ptr_ty.elemType2(mod); const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: { - const len_air_ref = try sema.fieldVal(block, src, dest_ptr, "len", dest_src); + const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len"), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; @@ -23068,11 +23189,11 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; } - break :blk FuncLinkSection{ .explicit = try val.toAllocatedBytes(ty, sema.arena, sema.mod) }; + break :blk FuncLinkSection{ .explicit = try val.toIpString(ty, mod) }; } else if (extra.data.bits.has_section_ref) blk: { const section_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; - const section_name = sema.resolveConstString(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { + const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { error.GenericPoison => { break :blk FuncLinkSection{ .generic = {} }; }, @@ -23272,6 +23393,8 @@ fn resolvePrefetchOptions( zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.PrefetchOptions { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const options_ty = try sema.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); @@ -23279,13 +23402,13 @@ fn resolvePrefetchOptions( const locality_src = sema.maybeOptionsSrc(block, src, "locality"); const cache_src = sema.maybeOptionsSrc(block, src, "cache"); - const rw = try sema.fieldVal(block, src, options, "rw", rw_src); + const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "rw"), rw_src); const rw_val = try sema.resolveConstValue(block, rw_src, rw, "prefetch read/write must be comptime-known"); - const locality = try sema.fieldVal(block, src, options, "locality", locality_src); + const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "locality"), locality_src); const locality_val = try sema.resolveConstValue(block, locality_src, locality, "prefetch locality must be comptime-known"); - const cache = try sema.fieldVal(block, src, options, "cache", cache_src); + const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "cache"), cache_src); const cache_val = try sema.resolveConstValue(block, cache_src, cache, "prefetch cache must be comptime-known"); return std.builtin.PrefetchOptions{ @@ -23336,6 +23459,8 @@ fn resolveExternOptions( zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.ExternOptions { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const options_inst = try sema.resolveInst(zir_ref); const extern_options_ty = try sema.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); @@ -23345,18 +23470,18 @@ fn resolveExternOptions( const linkage_src = sema.maybeOptionsSrc(block, src, "linkage"); const thread_local_src = sema.maybeOptionsSrc(block, src, "thread_local"); - const name_ref = try sema.fieldVal(block, src, options, "name", name_src); + const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src); const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known"); const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); - const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src); + const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "library_name"), library_src); const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known"); - const linkage_ref = try sema.fieldVal(block, src, options, "linkage", linkage_src); + const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_ref, "linkage of the extern symbol must be comptime-known"); const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); + const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "is_thread_local"), thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); const library_name = if (library_name_val.optionalValue(mod)) |payload| blk: { @@ -23425,7 +23550,7 @@ fn zirBuiltinExtern( const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); errdefer mod.destroyDecl(new_decl_index); const new_decl = mod.declPtr(new_decl_index); - new_decl.name = try sema.gpa.dupeZ(u8, options.name); + new_decl.name = try mod.intern_pool.getOrPutString(sema.gpa, options.name); { const new_var = try mod.intern(.{ .variable = .{ @@ -23444,7 +23569,7 @@ fn zirBuiltinExtern( new_decl.ty = ty; new_decl.val = new_var.toValue(); new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; @@ -24265,12 +24390,13 @@ fn safetyPanic( panic_id: PanicId, ) CompileError!void { const mod = sema.mod; + const gpa = sema.gpa; const panic_messages_ty = try sema.getBuiltinType("panic_messages"); const msg_decl_index = (try sema.namespaceLookup( block, sema.src, panic_messages_ty.getNamespaceIndex(mod).unwrap().?, - @tagName(panic_id), + try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id)), )).?; const msg_inst = try sema.analyzeDeclVal(block, sema.src, msg_decl_index); @@ -24302,14 +24428,13 @@ fn fieldVal( block: *Block, src: LazySrcLoc, object: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. const mod = sema.mod; - const gpa = sema.gpa; const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); @@ -24326,12 +24451,12 @@ fn fieldVal( switch (inner_ty.zigTypeTag(mod)) { .Array => { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { return sema.addConstant( Type.usize, try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), ); - } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { + } else if (ip.stringEqlSlice(field_name, "ptr") and is_pointer_to) { const ptr_info = object_ty.ptrInfo(mod); const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = ptr_info.pointee_type.childType(mod), @@ -24352,20 +24477,20 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(mod) }, + .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, ); } }, .Pointer => { const ptr_info = inner_ty.ptrInfo(mod); if (ptr_info.size == .Slice) { - if (mem.eql(u8, field_name, "ptr")) { + if (ip.stringEqlSlice(field_name, "ptr")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; return sema.analyzeSlicePtr(block, object_src, slice, inner_ty); - } else if (mem.eql(u8, field_name, "len")) { + } else if (ip.stringEqlSlice(field_name, "len")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else @@ -24376,7 +24501,7 @@ fn fieldVal( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(mod) }, + .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, ); } } @@ -24392,13 +24517,12 @@ fn fieldVal( switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { - const name = try ip.getOrPutString(gpa, field_name); switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { - if (error_set_type.nameIndex(ip, name) != null) break :blk; + if (error_set_type.nameIndex(ip, field_name) != null) break :blk; const msg = msg: { const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(mod), + ip.stringToSlice(field_name), child_type.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, child_type); @@ -24419,10 +24543,10 @@ fn fieldVal( const error_set_type = if (!child_type.isAnyError(mod)) child_type else - try mod.singleErrorSetTypeNts(name); + try mod.singleErrorSetTypeNts(field_name); return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), - .name = name, + .name = field_name, } })).toValue()); }, .Union => { @@ -24499,7 +24623,7 @@ fn fieldPtr( block: *Block, src: LazySrcLoc, object_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, initializing: bool, ) CompileError!Air.Inst.Ref { @@ -24507,7 +24631,6 @@ fn fieldPtr( // in `fieldVal`. This function takes a pointer and returns a pointer. const mod = sema.mod; - const gpa = sema.gpa; const ip = &mod.intern_pool; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); @@ -24528,7 +24651,7 @@ fn fieldPtr( switch (inner_ty.zigTypeTag(mod)) { .Array => { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( @@ -24541,7 +24664,7 @@ fn fieldPtr( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(mod) }, + .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, ); } }, @@ -24553,7 +24676,7 @@ fn fieldPtr( const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty; - if (mem.eql(u8, field_name, "ptr")) { + if (ip.stringEqlSlice(field_name, "ptr")) { const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); const result_ty = try Type.ptr(sema.arena, mod, .{ @@ -24575,7 +24698,7 @@ fn fieldPtr( try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); - } else if (mem.eql(u8, field_name, "len")) { + } else if (ip.stringEqlSlice(field_name, "len")) { const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = Type.usize, .mutable = attr_ptr_ty.ptrIsMutable(mod), @@ -24600,7 +24723,7 @@ fn fieldPtr( block, field_name_src, "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(mod) }, + .{ ip.stringToSlice(field_name), object_ty.fmt(mod) }, ); } }, @@ -24617,14 +24740,13 @@ fn fieldPtr( switch (child_type.zigTypeTag(mod)) { .ErrorSet => { - const name = try ip.getOrPutString(gpa, field_name); switch (ip.indexToKey(child_type.toIntern())) { .error_set_type => |error_set_type| blk: { - if (error_set_type.nameIndex(ip, name) != null) { + if (error_set_type.nameIndex(ip, field_name) != null) { break :blk; } return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(mod), + ip.stringToSlice(field_name), child_type.fmt(mod), }); }, .inferred_error_set_type => { @@ -24642,12 +24764,12 @@ fn fieldPtr( const error_set_type = if (!child_type.isAnyError(mod)) child_type else - try mod.singleErrorSetTypeNts(name); + try mod.singleErrorSetTypeNts(field_name); return sema.analyzeDeclRef(try anon_decl.finish( error_set_type, (try mod.intern(.{ .err = .{ .ty = error_set_type.toIntern(), - .name = name, + .name = field_name, } })).toValue(), 0, // default alignment )); @@ -24736,13 +24858,14 @@ fn fieldCallBind( block: *Block, src: LazySrcLoc, raw_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!ResolvedFieldCallee { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. const mod = sema.mod; + const ip = &mod.intern_pool; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) @@ -24771,18 +24894,18 @@ fn fieldCallBind( return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); } else if (struct_ty.isTuple(mod)) { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)) }; } - if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { + if (std.fmt.parseUnsigned(u32, ip.stringToSlice(field_name), 10)) |field_index| { if (field_index >= struct_ty.structFieldCount(mod)) break :find_field; return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index, mod), field_index, object_ptr); } else |_| {} } else { const max = struct_ty.structFieldCount(mod); - var i: u32 = 0; - while (i < max) : (i += 1) { - if (mem.eql(u8, struct_ty.structFieldName(i, mod), field_name)) { + for (0..max) |i_usize| { + const i = @intCast(u32, i_usize); + if (field_name == struct_ty.structFieldName(i, mod)) { return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr); } } @@ -24876,12 +24999,12 @@ fn fieldCallBind( }; const msg = msg: { - const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ field_name, concrete_ty.fmt(mod) }); + const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ ip.stringToSlice(field_name), concrete_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, concrete_ty); if (found_decl) |decl_idx| { const decl = mod.declPtr(decl_idx); - try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{s}' is not a member function", .{field_name}); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{s}' is not a member function", .{ip.stringToSlice(field_name)}); } break :msg msg; }; @@ -24933,7 +25056,7 @@ fn namespaceLookup( block: *Block, src: LazySrcLoc, namespace: Namespace.Index, - decl_name: []const u8, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Decl.Index { const mod = sema.mod; const gpa = sema.gpa; @@ -24942,7 +25065,7 @@ fn namespaceLookup( if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ - decl_name, + mod.intern_pool.stringToSlice(decl_name), }); errdefer msg.destroy(gpa); try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "declared here", .{}); @@ -24960,7 +25083,7 @@ fn namespaceLookupRef( block: *Block, src: LazySrcLoc, namespace: Namespace.Index, - decl_name: []const u8, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; try sema.addReferencedBy(block, src, decl); @@ -24972,7 +25095,7 @@ fn namespaceLookupVal( block: *Block, src: LazySrcLoc, namespace: Namespace.Index, - decl_name: []const u8, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; return try sema.analyzeDeclVal(block, src, decl); @@ -24983,7 +25106,7 @@ fn structFieldPtr( block: *Block, src: LazySrcLoc, struct_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, initializing: bool, @@ -24995,7 +25118,7 @@ fn structFieldPtr( try sema.resolveStructLayout(struct_ty); if (struct_ty.isTuple(mod)) { - if (mem.eql(u8, field_name, "len")) { + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)); return sema.analyzeRef(block, src, len_inst); } @@ -25101,7 +25224,7 @@ fn structFieldPtrByIndex( if (field.is_comptime) { const val = try mod.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), - .addr = .{ .comptime_field = try field.default_val.intern(field.ty, mod) }, + .addr = .{ .comptime_field = field.default_val }, } }); return sema.addConstant(ptr_field_ty, val.toValue()); } @@ -25126,7 +25249,7 @@ fn structFieldVal( block: *Block, src: LazySrcLoc, struct_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { @@ -25145,7 +25268,7 @@ fn structFieldVal( const field = struct_obj.fields.values()[field_index]; if (field.is_comptime) { - return sema.addConstant(field.ty, field.default_val); + return sema.addConstant(field.ty, field.default_val.toValue()); } if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { @@ -25176,12 +25299,12 @@ fn tupleFieldVal( block: *Block, src: LazySrcLoc, tuple_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, tuple_ty: Type, ) CompileError!Air.Inst.Ref { const mod = sema.mod; - if (mem.eql(u8, field_name, "len")) { + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount(mod)); } const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src); @@ -25193,11 +25316,12 @@ fn tupleFieldIndex( sema: *Sema, block: *Block, tuple_ty: Type, - field_name: []const u8, + field_name_ip: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!u32 { const mod = sema.mod; - assert(!mem.eql(u8, field_name, "len")); + const field_name = mod.intern_pool.stringToSlice(field_name_ip); + assert(!std.mem.eql(u8, field_name, "len")); if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { if (field_index < tuple_ty.structFieldCount(mod)) return field_index; return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ @@ -25253,13 +25377,14 @@ fn unionFieldPtr( block: *Block, src: LazySrcLoc, union_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_union_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { const arena = sema.arena; const mod = sema.mod; + const ip = &mod.intern_pool; assert(unresolved_union_ty.zigTypeTag(mod) == .Union); @@ -25281,7 +25406,9 @@ fn unionFieldPtr( const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{ + ip.stringToSlice(field_name), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; @@ -25296,14 +25423,17 @@ fn unionFieldPtr( if (union_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, src); } - const un = mod.intern_pool.indexToKey(union_val.toIntern()).un; + const un = ip.indexToKey(union_val.toIntern()).un; const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); if (!tag_matches) { const msg = msg: { const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); - const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); + const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ + ip.stringToSlice(field_name), + ip.stringToSlice(active_field_name), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -25345,11 +25475,12 @@ fn unionFieldVal( block: *Block, src: LazySrcLoc, union_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); @@ -25361,7 +25492,7 @@ fn unionFieldVal( if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty); - const un = mod.intern_pool.indexToKey(union_val.toIntern()).un; + const un = ip.indexToKey(union_val.toIntern()).un; const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const tag_matches = un.tag == field_tag.toIntern(); switch (union_obj.layout) { @@ -25372,7 +25503,9 @@ fn unionFieldVal( const msg = msg: { const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); - const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); + const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ + ip.stringToSlice(field_name), ip.stringToSlice(active_field_name), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -26470,14 +26603,13 @@ fn coerceExtra( // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); const string = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; - const bytes = mod.intern_pool.stringToSlice(string); - const field_index = dest_ty.enumFieldIndex(bytes, mod) orelse { + const field_index = dest_ty.enumFieldIndex(string, mod) orelse { const msg = msg: { const msg = try sema.errMsg( block, inst_src, "no field named '{s}' in enum '{}'", - .{ bytes, dest_ty.fmt(mod) }, + .{ mod.intern_pool.stringToSlice(string), dest_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -27876,10 +28008,7 @@ fn storePtrVal( error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - const arena = mut_kit.beginArena(mod); - defer mut_kit.finishArena(mod); - - reinterpret.val_ptr.* = (try (try Value.readFromMemory(mut_kit.ty, mod, buffer, arena)).intern(mut_kit.ty, mod)).toValue(); + reinterpret.val_ptr.* = (try (try Value.readFromMemory(mut_kit.ty, mod, buffer, sema.arena)).intern(mut_kit.ty, mod)).toValue(); }, .bad_decl_ty, .bad_ptr_ty => { // TODO show the decl declaration site in a note and explain whether the decl @@ -27913,18 +28042,6 @@ const ComptimePtrMutationKit = struct { bad_ptr_ty, }, ty: Type, - decl_arena: std.heap.ArenaAllocator = undefined, - - fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { - const decl = mod.declPtr(self.mut_decl.decl); - return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena); - } - - fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { - const decl = mod.declPtr(self.mut_decl.decl); - decl.value_arena.?.release(&self.decl_arena); - self.decl_arena = undefined; - } }; fn beginComptimePtrMutation( @@ -27966,10 +28083,8 @@ fn beginComptimePtrMutation( // An error union has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the // representation of the error union from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - const payload = try arena.create(Value.Payload.SubValue); + const payload = try sema.arena.create(Value.Payload.SubValue); payload.* = .{ .base = .{ .tag = .eu_payload }, .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), @@ -28019,10 +28134,8 @@ fn beginComptimePtrMutation( // An optional has been initialized to undefined at comptime and now we // are for the first time setting the payload. We must change the // representation of the optional from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - const payload = try arena.create(Value.Payload.SubValue); + const payload = try sema.arena.create(Value.Payload.SubValue); payload.* = .{ .base = .{ .tag = .opt_payload }, .data = payload_val.toValue(), @@ -28088,8 +28201,7 @@ fn beginComptimePtrMutation( // If we wanted to avoid this, there would need to be special detection // elsewhere to identify when writing a value to an array element that is stored // using the `bytes` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const arena = sema.arena; const bytes = val_ptr.castTag(.bytes).?.data; const dest_len = parent.ty.arrayLenIncludingSentinel(mod); @@ -28121,8 +28233,7 @@ fn beginComptimePtrMutation( // need to be special detection elsewhere to identify when writing a value to an // array element that is stored using the `repeated` tag, and handle it // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const arena = sema.arena; const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); const array_len_including_sentinel = @@ -28163,8 +28274,7 @@ fn beginComptimePtrMutation( // An array has been initialized to undefined at comptime and now we // are for the first time setting an element. We must change the representation // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const arena = sema.arena; const array_len_including_sentinel = try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); @@ -28261,8 +28371,7 @@ fn beginComptimePtrMutation( parent.mut_decl, ), .repeated => { - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const arena = sema.arena; const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); @memset(elems, val_ptr.castTag(.repeated).?.data); @@ -28325,8 +28434,7 @@ fn beginComptimePtrMutation( // A struct or union has been initialized to undefined at comptime and now we // are for the first time setting a field. We must change the representation // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + const arena = sema.arena; switch (parent.ty.zigTypeTag(mod)) { .Struct => { @@ -28436,11 +28544,7 @@ fn beginComptimePtrMutationInner( const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; - const decl = mod.declPtr(mut_decl.decl); - var decl_arena: std.heap.ArenaAllocator = undefined; - const allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); - decl_val.* = try decl_val.unintern(allocator, mod); + decl_val.* = try decl_val.unintern(sema.arena, mod); if (coerce_ok) { return ComptimePtrMutationKit{ @@ -28928,6 +29032,7 @@ fn coerceEnumToUnion( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); const tag_ty = union_ty.unionTagType(mod) orelse { @@ -28966,7 +29071,9 @@ fn coerceEnumToUnion( errdefer msg.destroy(sema.gpa); const field_name = union_obj.fields.keys()[field_index]; - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{ + ip.stringToSlice(field_name), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; @@ -28976,11 +29083,14 @@ fn coerceEnumToUnion( const msg = msg: { const field_name = union_obj.fields.keys()[field_index]; const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{s}'", .{ - inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), field_ty.fmt(sema.mod), field_name, + inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), + field_ty.fmt(sema.mod), ip.stringToSlice(field_name), }); errdefer msg.destroy(sema.gpa); - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{ + ip.stringToSlice(field_name), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; @@ -29049,7 +29159,10 @@ fn coerceEnumToUnion( const field_name = field.key_ptr.*; const field_ty = field.value_ptr.ty; if (!(try sema.typeHasRuntimeBits(field_ty))) continue; - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) }); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ + ip.stringToSlice(field_name), + field_ty.fmt(sema.mod), + }); } try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -29068,11 +29181,11 @@ fn coerceAnonStructToUnion( const mod = sema.mod; const inst_ty = sema.typeOf(inst); const field_info: union(enum) { - name: []const u8, + name: InternPool.NullTerminatedString, count: usize, } = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1) - .{ .name = mod.intern_pool.stringToSlice(anon_struct_type.names[0]) } + .{ .name = anon_struct_type.names[0] } else .{ .count = anon_struct_type.names.len }, .struct_type => |struct_type| name: { @@ -29335,6 +29448,7 @@ fn coerceTupleToStruct( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; const struct_ty = try sema.resolveTypeFields(dest_ty); if (struct_ty.isTupleOrAnonStruct(mod)) { @@ -29348,7 +29462,7 @@ fn coerceTupleToStruct( const inst_ty = sema.typeOf(inst); var runtime_src: ?LazySrcLoc = null; - const field_count = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + const field_count = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| struct_obj.fields.count() @@ -29360,11 +29474,11 @@ fn coerceTupleToStruct( const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location // https://github.com/ziglang/zig/issues/15709 - const field_name: []const u8 = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) - mod.intern_pool.stringToSlice(anon_struct_type.names[field_i]) + anon_struct_type.names[field_i] else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}), + try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}), .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], else => unreachable, }; @@ -29378,7 +29492,7 @@ fn coerceTupleToStruct( return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(field.default_val, field.ty, sema.mod)) { + if (!init_val.eql(field.default_val.toValue(), field.ty, sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } @@ -29401,9 +29515,9 @@ fn coerceTupleToStruct( const field_name = fields.keys()[i]; const field = fields.values()[i]; const field_src = inst_src; // TODO better source location - if (field.default_val.toIntern() == .unreachable_value) { + if (field.default_val == .none) { const template = "missing struct field: {s}"; - const args = .{field_name}; + const args = .{ip.stringToSlice(field_name)}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -29412,9 +29526,9 @@ fn coerceTupleToStruct( continue; } if (runtime_src == null) { - field_vals[i] = field.default_val.toIntern(); + field_vals[i] = field.default_val; } else { - field_ref.* = try sema.addConstant(field.ty, field.default_val); + field_ref.* = try sema.addConstant(field.ty, field.default_val.toValue()); } } @@ -29433,7 +29547,7 @@ fn coerceTupleToStruct( .ty = struct_ty.toIntern(), .storage = .{ .elems = field_vals }, } }); - errdefer mod.intern_pool.remove(struct_val); + errdefer ip.remove(struct_val); return sema.addConstant(struct_ty, struct_val.toValue()); } @@ -29446,7 +29560,8 @@ fn coerceTupleToTuple( inst_src: LazySrcLoc, ) !Air.Inst.Ref { const mod = sema.mod; - const dest_field_count = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + const ip = &mod.intern_pool; + const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| struct_obj.fields.count() @@ -29459,7 +29574,7 @@ fn coerceTupleToTuple( @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const src_field_count = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| struct_obj.fields.count() @@ -29474,30 +29589,26 @@ fn coerceTupleToTuple( const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location // https://github.com/ziglang/zig/issues/15709 - const field_name: []const u8 = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) - mod.intern_pool.stringToSlice(anon_struct_type.names[field_i]) + anon_struct_type.names[field_i] else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}), + try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}), .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], else => unreachable, }; - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{}); - } - const field_ty = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types[field_index_usize].toType(), .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty, else => unreachable, }; - const default_val = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.values[field_index_usize], - .struct_type => |struct_type| switch (mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val.toIntern()) { - .unreachable_value => .none, - else => |default_val| default_val, - }, + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val, else => unreachable, }; @@ -29531,12 +29642,9 @@ fn coerceTupleToTuple( for (field_refs, 0..) |*field_ref, i| { if (field_ref.* != .none) continue; - const default_val = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.values[i], - .struct_type => |struct_type| switch (mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val.toIntern()) { - .unreachable_value => .none, - else => |default_val| default_val, - }, + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val, else => unreachable, }; @@ -29552,7 +29660,7 @@ fn coerceTupleToTuple( continue; } const template = "missing struct field: {s}"; - const args = .{tuple_ty.structFieldName(i, mod)}; + const args = .{ip.stringToSlice(tuple_ty.structFieldName(i, mod))}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -29563,7 +29671,7 @@ fn coerceTupleToTuple( if (runtime_src == null) { field_vals[i] = default_val; } else { - const field_ty = switch (mod.intern_pool.indexToKey(tuple_ty.toIntern())) { + const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { .anon_struct_type => |anon_struct_type| anon_struct_type.types[i].toType(), .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].ty, else => unreachable, @@ -31803,15 +31911,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { } if (struct_obj.layout == .Auto and mod.backendSupportsFeature(.field_reordering)) { - const optimized_order = if (struct_obj.owner_decl == sema.owner_decl_index) - try sema.perm_arena.alloc(u32, struct_obj.fields.count()) - else blk: { - const decl = mod.declPtr(struct_obj.owner_decl); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(sema.gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); - break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count()); - }; + const optimized_order = try mod.tmp_hack_arena.allocator().alloc(u32, struct_obj.fields.count()); for (struct_obj.fields.values(), 0..) |field, i| { optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty)) @@ -31852,9 +31952,6 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi const decl_index = struct_obj.owner_decl; const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; @@ -31880,7 +31977,6 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -31936,7 +32032,6 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .mod = mod, .gpa = gpa, .arena = undefined, - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -32581,6 +32676,7 @@ fn resolveInferredErrorSetTy( fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; + const ip = &mod.intern_pool; const decl_index = struct_obj.owner_decl; const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; @@ -32628,9 +32724,6 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -32642,7 +32735,6 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -32674,7 +32766,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } struct_obj.fields = .{}; - try struct_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); + try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); const Field = struct { type_body_len: u32 = 0, @@ -32725,16 +32817,15 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void extra_index += 1; // This string needs to outlive the ZIR code. - const field_name = if (field_name_zir) |some| - try decl_arena_allocator.dupe(u8, some) - else - try std.fmt.allocPrint(decl_arena_allocator, "{d}", .{field_i}); + const field_name = try ip.getOrPutString(gpa, if (field_name_zir) |s| s else try std.fmt.allocPrint(sema.arena, "{d}", .{ + field_i, + })); const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{field_name}); + const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{ip.stringToSlice(field_name)}); errdefer msg.destroy(gpa); const prev_field_index = struct_obj.fields.getIndex(field_name).?; @@ -32748,7 +32839,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void gop.value_ptr.* = .{ .ty = Type.noreturn, .abi_align = 0, - .default_val = Value.@"unreachable", + .default_val = .none, .is_comptime = is_comptime, .offset = undefined, }; @@ -32917,7 +33008,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }).lazy; return sema.failWithNeededComptime(&block_scope, init_src, "struct field default value must be comptime-known"); }; - field.default_val = try default_val.copy(decl_arena_allocator); + field.default_val = try default_val.intern(field.ty, mod); } } } @@ -32935,6 +33026,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { defer tracy.end(); const gpa = mod.gpa; + const ip = &mod.intern_pool; const decl_index = union_obj.owner_decl; const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; @@ -32978,9 +33070,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { extra_index += body.len; const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -32992,7 +33081,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, @@ -33033,7 +33121,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { try ct_decl.intern(mod); } - try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); + try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); var int_tag_ty: Type = undefined; var enum_field_names: []InternPool.NullTerminatedString = &.{}; @@ -33070,7 +33158,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } else { // The provided type is the enum tag type. union_obj.tag_ty = provided_ty; - const enum_type = switch (mod.intern_pool.indexToKey(union_obj.tag_ty.toIntern())) { + const enum_type = switch (ip.indexToKey(union_obj.tag_ty.toIntern())) { .enum_type => |x| x, else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}), }; @@ -33174,10 +33262,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } // This string needs to outlive the ZIR code. - const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); - const field_name_ip = try mod.intern_pool.getOrPutString(gpa, field_name); + const field_name = try ip.getOrPutString(gpa, field_name_zir); if (enum_field_names.len != 0) { - enum_field_names[field_i] = field_name_ip; + enum_field_names[field_i] = field_name; } const field_ty: Type = if (!has_type) @@ -33205,7 +33292,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { if (gop.found_existing) { const msg = msg: { const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{field_name}); + const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{ + ip.stringToSlice(field_name), + }); errdefer msg.destroy(gpa); const prev_field_index = union_obj.fields.getIndex(field_name).?; @@ -33218,14 +33307,14 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (explicit_enum_info) |tag_info| { - const enum_index = tag_info.nameIndex(&mod.intern_pool, field_name_ip) orelse { + const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ - field_name, union_obj.tag_ty.fmt(mod), + ip.stringToSlice(field_name), union_obj.tag_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -33317,7 +33406,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { for (tag_info.names, 0..) |field_name, field_index| { if (explicit_tags_seen[field_index]) continue; try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{ - mod.intern_pool.stringToSlice(field_name), + ip.stringToSlice(field_name), }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); @@ -33345,14 +33434,22 @@ fn generateUnionTagTypeNumbered( union_obj: *Module.Union, ) !Type { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const src_decl = mod.declPtr(block.src_decl); const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); const name = name: { - const fqn = try union_obj.getFullyQualifiedName(mod); - defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); + const prefix = "@typeInfo("; + const fqn = ip.stringToSlice(try union_obj.getFullyQualifiedName(mod)); + const suffix = ").Union.tag_type.?"; + const start = ip.string_bytes.items.len; + try ip.string_bytes.ensureUnusedCapacity(gpa, prefix.len + suffix.len + fqn.len); + ip.string_bytes.appendSliceAssumeCapacity(prefix); + ip.string_bytes.appendSliceAssumeCapacity(fqn); + ip.string_bytes.appendSliceAssumeCapacity(suffix); + break :name try ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, @@ -33390,6 +33487,8 @@ fn generateUnionTagTypeSimple( maybe_union_obj: ?*Module.Union, ) !Type { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const new_decl_index = new_decl_index: { const union_obj = maybe_union_obj orelse { @@ -33402,9 +33501,15 @@ fn generateUnionTagTypeSimple( const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); const name = name: { - const fqn = try union_obj.getFullyQualifiedName(mod); - defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(sema.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); + const prefix = "@typeInfo("; + const fqn = ip.stringToSlice(try union_obj.getFullyQualifiedName(mod)); + const suffix = ").Union.tag_type.?"; + const start = ip.string_bytes.items.len; + try ip.string_bytes.ensureUnusedCapacity(gpa, prefix.len + suffix.len + fqn.len); + ip.string_bytes.appendSliceAssumeCapacity(prefix); + ip.string_bytes.appendSliceAssumeCapacity(fqn); + ip.string_bytes.appendSliceAssumeCapacity(suffix); + break :name try ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); }; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ .ty = Type.type, @@ -33436,7 +33541,9 @@ fn generateUnionTagTypeSimple( } fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { - var wip_captures = try WipCaptureScope.init(sema.gpa, sema.owner_decl.src_scope); + const gpa = sema.gpa; + + var wip_captures = try WipCaptureScope.init(gpa, sema.owner_decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ @@ -33450,19 +33557,20 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { .is_comptime = true, }; defer { - block.instructions.deinit(sema.gpa); - block.params.deinit(sema.gpa); + block.instructions.deinit(gpa); + block.params.deinit(gpa); } const src = LazySrcLoc.nodeOffset(0); const mod = sema.mod; + const ip = &mod.intern_pool; const std_pkg = mod.main_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; const opt_builtin_inst = (try sema.namespaceLookupRef( &block, src, mod.declPtr(std_file.root_decl.unwrap().?).src_namespace, - "builtin", + try ip.getOrPutString(gpa, "builtin"), )) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); const builtin_inst = try sema.analyzeLoad(&block, src, opt_builtin_inst, src); const builtin_ty = sema.analyzeAsType(&block, src, builtin_inst) catch |err| switch (err) { @@ -33473,7 +33581,7 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { &block, src, builtin_ty.getNamespaceIndex(mod).unwrap().?, - name, + try ip.getOrPutString(gpa, name), )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name}); return sema.analyzeDeclVal(&block, src, opt_ty_decl); } @@ -33608,7 +33716,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { const field_vals = try sema.arena.alloc(InternPool.Index, s.fields.count()); for (field_vals, s.fields.values(), 0..) |*field_val, field, i| { if (field.is_comptime) { - field_val.* = try field.default_val.intern(field.ty, mod); + field_val.* = field.default_val; continue; } if (field.ty.eql(resolved_ty, sema.mod)) { @@ -34287,7 +34395,7 @@ fn unionFieldIndex( sema: *Sema, block: *Block, unresolved_union_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; @@ -34302,7 +34410,7 @@ fn structFieldIndex( sema: *Sema, block: *Block, unresolved_struct_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; @@ -34321,19 +34429,17 @@ fn anonStructFieldIndex( sema: *Sema, block: *Block, struct_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { const mod = sema.mod; switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { .anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| { - if (mem.eql(u8, mod.intern_pool.stringToSlice(name), field_name)) { - return @intCast(u32, i); - } + if (name == field_name) return @intCast(u32, i); }, .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { for (struct_obj.fields.keys(), 0..) |name, i| { - if (mem.eql(u8, name, field_name)) { + if (name == field_name) { return @intCast(u32, i); } } @@ -34341,7 +34447,7 @@ fn anonStructFieldIndex( else => unreachable, } return sema.fail(block, field_src, "no field named '{s}' in anonymous struct '{}'", .{ - field_name, struct_ty.fmt(sema.mod), + mod.intern_pool.stringToSlice(field_name), struct_ty.fmt(sema.mod), }); } diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 1ff3ce9415..0d771aa184 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -201,10 +201,10 @@ pub fn print( }, .variable => return writer.writeAll("(variable)"), .extern_func => |extern_func| return writer.print("(extern function '{s}')", .{ - mod.declPtr(extern_func.decl).name, + mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name), }), - .func => |func| return writer.print("(function '{s}')", .{ - mod.declPtr(mod.funcPtr(func.index).owner_decl).name, + .func => |func| return writer.print("(function '{d}')", .{ + mod.intern_pool.stringToSlice(mod.declPtr(mod.funcPtr(func.index).owner_decl).name), }), .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), @@ -296,19 +296,20 @@ fn printAggregate( } if (ty.zigTypeTag(mod) == .Struct) { try writer.writeAll(".{"); - const max_len = std.math.min(ty.structFieldCount(mod), max_aggregate_items); + const max_len = @min(ty.structFieldCount(mod), max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { + for (0..max_len) |i| { if (i != 0) try writer.writeAll(", "); - if (switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[i], - .anon_struct_type => |anon_struct_type| if (anon_struct_type.isTuple()) - null - else - mod.intern_pool.stringToSlice(anon_struct_type.names[i]), + + const field_name = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |x| mod.structPtrUnwrap(x.index).?.fields.keys()[i].toOptional(), + .anon_struct_type => |x| if (x.isTuple()) .none else x.names[i].toOptional(), else => unreachable, - }) |field_name| try writer.print(".{s} = ", .{field_name}); + }; + + if (field_name.unwrap()) |name_ip| try writer.print(".{s} = ", .{ + mod.intern_pool.stringToSlice(name_ip), + }); try print(.{ .ty = ty.structFieldType(i, mod), .val = try val.fieldValue(mod, i), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index d01a93dd0d..bf945e6983 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4350,7 +4350,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .data = .{ .reg = .x30 }, }); } else if (func_value.getExternFunc(mod)) |extern_func| { - const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0); + const decl_name = mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name); const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.MachO)) |macho_file| { const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index fde5424ddc..b660126604 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -276,8 +276,6 @@ pub fn generate( assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; - log.debug("fn {s}", .{fn_owner_decl.name}); - var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { assert(branch_stack.items.len == 1); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 7b1258155c..9b7ba19c13 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2208,7 +2208,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const atom = func.bin_file.getAtomPtr(atom_index); const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); try func.bin_file.addOrUpdateImport( - mem.sliceTo(ext_decl.name, 0), + mod.intern_pool.stringToSlice(ext_decl.name), atom.getSymbolIndex().?, mod.intern_pool.stringToSliceUnwrap(ext_decl.getOwnedExternFunc(mod).?.lib_name), type_index, @@ -3180,9 +3180,8 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { } }, .err => |err| { - const name = mod.intern_pool.stringToSlice(err.name); - const kv = try mod.getErrorValue(name); - return WValue{ .imm32 = kv.value }; + const int = try mod.getErrorValue(err.name); + return WValue{ .imm32 = int }; }, .error_union => |error_union| { const err_tv: TypedValue = switch (error_union.val) { @@ -3320,18 +3319,15 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), .int => |int| intStorageAsI32(int.storage, mod), .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), - .err => |err| @bitCast(i32, mod.global_error_set.get(mod.intern_pool.stringToSlice(err.name)).?), + .err => |err| @bitCast(i32, @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err.name).?)), else => unreachable, }, } - switch (ty.zigTypeTag(mod)) { - .ErrorSet => { - const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError(mod).?) catch unreachable; // passed invalid `Value` to function - return @bitCast(i32, kv.value); - }, + return switch (ty.zigTypeTag(mod)) { + .ErrorSet => @bitCast(i32, val.getErrorInt(mod)), else => unreachable, // Programmer called this function for an illegal type - } + }; } fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 { @@ -6874,8 +6870,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod); - defer mod.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod)); const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); // check if we already generated code for this. @@ -7037,9 +7032,8 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lowest: ?u32 = null; var highest: ?u32 = null; - for (names) |name_ip| { - const name = mod.intern_pool.stringToSlice(name_ip); - const err_int = mod.global_error_set.get(name).?; + for (names) |name| { + const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b9cc3f7052..2675d5350a 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8132,7 +8132,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier })); } else unreachable; } else if (func_value.getExternFunc(mod)) |extern_func| { - const decl_name = mem.sliceTo(mod.declPtr(extern_func.decl).name, 0); + const decl_name = mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name); const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = try self.owner.getSymbolIndex(self); diff --git a/src/codegen.zig b/src/codegen.zig index b0febb5ea7..77359d78da 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -142,11 +142,12 @@ pub fn generateLazySymbol( if (lazy_sym.ty.isAnyError(mod)) { alignment.* = 4; - const err_names = mod.error_name_list.items; + const err_names = mod.global_error_set.keys(); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian); var offset = code.items.len; try code.resize((1 + err_names.len + 1) * 4); - for (err_names) |err_name| { + for (err_names) |err_name_nts| { + const err_name = mod.intern_pool.stringToSlice(err_name_nts); mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); offset += 4; try code.ensureUnusedCapacity(err_name.len + 1); @@ -251,15 +252,13 @@ pub fn generateSymbol( val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); }, .err => |err| { - const name = mod.intern_pool.stringToSlice(err.name); - const kv = try mod.getErrorValue(name); - try code.writer().writeInt(u16, @intCast(u16, kv.value), endian); + const int = try mod.getErrorValue(err.name); + try code.writer().writeInt(u16, @intCast(u16, int), endian); }, .error_union => |error_union| { const payload_ty = typed_value.ty.errorUnionPayload(mod); - const err_val = switch (error_union.val) { - .err_name => |err_name| @intCast(u16, (try mod.getErrorValue(mod.intern_pool.stringToSlice(err_name))).value), + .err_name => |err_name| @intCast(u16, try mod.getErrorValue(err_name)), .payload => @as(u16, 0), }; @@ -974,11 +973,8 @@ pub fn genTypedValue( }, owner_decl_index); }, .ErrorSet => { - const err_name = mod.intern_pool.stringToSlice( - mod.intern_pool.indexToKey(typed_value.val.toIntern()).err.name, - ); - const global_error_set = mod.global_error_set; - const error_index = global_error_set.get(err_name).?; + const err_name = mod.intern_pool.indexToKey(typed_value.val.toIntern()).err.name; + const error_index = mod.global_error_set.getIndex(err_name).?; return GenResult.mcv(.{ .immediate = error_index }); }, .ErrorUnion => { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4b325122ca..7b091d6823 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -452,6 +452,7 @@ pub const Function = struct { var promoted = f.object.dg.ctypes.promote(gpa); defer f.object.dg.ctypes.demote(promoted); const arena = promoted.arena.allocator(); + const mod = f.object.dg.module; gop.value_ptr.* = .{ .fn_name = switch (key) { @@ -460,7 +461,7 @@ pub const Function = struct { .never_inline, => |owner_decl| try std.fmt.allocPrint(arena, "zig_{s}_{}__{d}", .{ @tagName(key), - fmtIdent(mem.span(f.object.dg.module.declPtr(owner_decl).name)), + fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)), @enumToInt(owner_decl), }), }, @@ -1465,7 +1466,7 @@ pub const DeclGen = struct { try writer.writeAll(" .payload = {"); } if (field_ty.hasRuntimeBits(mod)) { - try writer.print(" .{ } = ", .{fmtIdent(field_name)}); + try writer.print(" .{ } = ", .{fmtIdent(mod.intern_pool.stringToSlice(field_name))}); try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type); try writer.writeByte(' '); } else for (ty.unionFields(mod).values()) |field| { @@ -1849,9 +1850,9 @@ pub const DeclGen = struct { try mod.markDeclAlive(decl); if (mod.decl_exports.get(decl_index)) |exports| { - try writer.writeAll(exports.items[export_index].options.name); + try writer.writeAll(mod.intern_pool.stringToSlice(exports.items[export_index].name)); } else if (decl.isExtern(mod)) { - try writer.writeAll(mem.span(decl.name)); + try writer.writeAll(mod.intern_pool.stringToSlice(decl.name)); } else { // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), // expand to 3x the length of its input, but let's cut it off at a much shorter limit. @@ -1987,7 +1988,7 @@ fn renderTypeName( try w.print("{s} {s}{}__{d}", .{ @tagName(tag)["fwd_".len..], attributes, - fmtIdent(mem.span(mod.declPtr(owner_decl).name)), + fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)), @enumToInt(owner_decl), }); }, @@ -2406,11 +2407,12 @@ pub fn genErrDecls(o: *Object) !void { try writer.writeAll("enum {\n"); o.indent_writer.pushIndent(); var max_name_len: usize = 0; - for (mod.error_name_list.items[1..], 1..) |name, value| { - max_name_len = std.math.max(name.len, max_name_len); + for (mod.global_error_set.keys()[1..], 1..) |name_nts, value| { + const name = mod.intern_pool.stringToSlice(name_nts); + max_name_len = @max(name.len, max_name_len); const err_val = try mod.intern(.{ .err = .{ .ty = .anyerror_type, - .name = mod.intern_pool.getString(name).unwrap().?, + .name = name_nts, } }); try o.dg.renderValue(writer, Type.anyerror, err_val.toValue(), .Other); try writer.print(" = {d}u,\n", .{value}); @@ -2424,7 +2426,8 @@ pub fn genErrDecls(o: *Object) !void { defer o.dg.gpa.free(name_buf); @memcpy(name_buf[0..name_prefix.len], name_prefix); - for (mod.error_name_list.items) |name| { + for (mod.global_error_set.keys()) |name_nts| { + const name = mod.intern_pool.stringToSlice(name_nts); @memcpy(name_buf[name_prefix.len..][0..name.len], name); const identifier = name_buf[0 .. name_prefix.len + name.len]; @@ -2446,14 +2449,15 @@ pub fn genErrDecls(o: *Object) !void { } const name_array_ty = try mod.arrayType(.{ - .len = mod.error_name_list.items.len, + .len = mod.global_error_set.count(), .child = .slice_const_u8_sentinel_0_type, }); try writer.writeAll("static "); try o.dg.renderTypeAndName(writer, name_array_ty, .{ .identifier = array_identifier }, Const, 0, .complete); try writer.writeAll(" = {"); - for (mod.error_name_list.items, 0..) |name, value| { + for (mod.global_error_set.keys(), 0..) |name_nts, value| { + const name = mod.intern_pool.stringToSlice(name_nts); if (value != 0) try writer.writeByte(','); const len_val = try mod.intValue(Type.usize, name.len); @@ -2469,14 +2473,16 @@ fn genExports(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); + const mod = o.dg.module; + const ip = &mod.intern_pool; const fwd_decl_writer = o.dg.fwd_decl.writer(); - if (o.dg.module.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { + if (mod.decl_exports.get(o.dg.decl_index.unwrap().?)) |exports| { for (exports.items[1..], 1..) |@"export", i| { try fwd_decl_writer.writeAll("zig_export("); try o.dg.renderFunctionSignature(fwd_decl_writer, o.dg.decl_index.unwrap().?, .forward, .{ .export_index = @intCast(u32, i) }); try fwd_decl_writer.print(", {s}, {s});\n", .{ - fmtStringLiteral(exports.items[0].options.name, null), - fmtStringLiteral(@"export".options.name, null), + fmtStringLiteral(ip.stringToSlice(exports.items[0].name), null), + fmtStringLiteral(ip.stringToSlice(@"export".name), null), }); } } @@ -2680,9 +2686,10 @@ pub fn genDecl(o: *Object) !void { if (!is_global) try w.writeAll("static "); if (variable.is_threadlocal) try w.writeAll("zig_threadlocal "); if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage "); - if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| + try w.print("zig_linksection(\"{s}\", ", .{s}); try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .{}, decl.@"align", .complete); - if (decl.@"linksection" != null) try w.writeAll(", read, write)"); + if (decl.@"linksection" != .none) try w.writeAll(", read, write)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, variable.init.toValue(), .StaticInitializer); try w.writeByte(';'); @@ -2697,9 +2704,10 @@ pub fn genDecl(o: *Object) !void { const w = o.writer(); if (!is_global) try w.writeAll("static "); - if (decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section}); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| + try w.print("zig_linksection(\"{s}\", ", .{s}); try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, Const, decl.@"align", .complete); - if (decl.@"linksection" != null) try w.writeAll(", read)"); + if (decl.@"linksection" != .none) try w.writeAll(", read)"); try w.writeAll(" = "); try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer); try w.writeAll(";\n"); @@ -4229,7 +4237,9 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; const writer = f.object.writer(); const function = mod.funcPtr(ty_fn.func); - try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name}); + try writer.print("/* dbg func:{s} */\n", .{ + mod.intern_pool.stringToSlice(mod.declPtr(function.owner_decl).name), + }); return .none; } @@ -5176,6 +5186,7 @@ fn fieldLocation( byte_offset: u32, end: void, } { + const ip = &mod.intern_pool; return switch (container_ty.zigTypeTag(mod)) { .Struct => switch (container_ty.containerLayout(mod)) { .Auto, .Extern => for (field_index..container_ty.structFieldCount(mod)) |next_field_index| { @@ -5186,7 +5197,7 @@ fn fieldLocation( break .{ .field = if (container_ty.isSimpleTuple(mod)) .{ .field = next_field_index } else - .{ .identifier = container_ty.structFieldName(next_field_index, mod) } }; + .{ .identifier = ip.stringToSlice(container_ty.structFieldName(next_field_index, mod)) } }; } else if (container_ty.hasRuntimeBitsIgnoreComptime(mod)) .end else .begin, .Packed => if (field_ptr_ty.ptrInfo(mod).host_size == 0) .{ .byte_offset = container_ty.packedStructFieldByteOffset(field_index, mod) } @@ -5204,9 +5215,9 @@ fn fieldLocation( .begin; const field_name = container_ty.unionFields(mod).keys()[field_index]; return .{ .field = if (container_ty.unionTagTypeSafety(mod)) |_| - .{ .payload_identifier = field_name } + .{ .payload_identifier = ip.stringToSlice(field_name) } else - .{ .identifier = field_name } }; + .{ .identifier = ip.stringToSlice(field_name) } }; }, .Packed => .begin, }, @@ -5347,6 +5358,7 @@ fn fieldPtr( fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; + const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; @@ -5369,7 +5381,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { .Auto, .Extern => if (struct_ty.isSimpleTuple(mod)) .{ .field = extra.field_index } else - .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + .{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) }, .Packed => { const struct_obj = mod.typeToStruct(struct_ty).?; const int_info = struct_ty.intInfo(mod); @@ -5431,7 +5443,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0) .{ .field = extra.field_index } else - .{ .identifier = struct_ty.structFieldName(extra.field_index, mod) }, + .{ .identifier = ip.stringToSlice(struct_ty.structFieldName(extra.field_index, mod)) }, .union_type => |union_type| field_name: { const union_obj = mod.unionPtr(union_type.index); @@ -5462,9 +5474,9 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { } else { const name = union_obj.fields.keys()[extra.field_index]; break :field_name if (union_type.hasTag()) .{ - .payload_identifier = name, + .payload_identifier = ip.stringToSlice(name), } else .{ - .identifier = name, + .identifier = ip.stringToSlice(name), }; } }, @@ -6723,6 +6735,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; + const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const inst_ty = f.typeOfIndex(inst); const len = @intCast(usize, inst_ty.arrayLen(mod)); @@ -6773,7 +6786,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(mod)) .{ .field = field_i } else - .{ .identifier = inst_ty.structFieldName(field_i, mod) }); + .{ .identifier = ip.stringToSlice(inst_ty.structFieldName(field_i, mod)) }); try a.assign(f, writer); try f.writeCValue(writer, element, .Other); try a.end(f, writer); @@ -6851,6 +6864,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const mod = f.object.dg.module; + const ip = &mod.intern_pool; const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; @@ -6886,8 +6900,8 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { try writer.print("{}", .{try f.fmtIntLiteral(tag_ty, int_val)}); try a.end(f, writer); } - break :field .{ .payload_identifier = field_name }; - } else .{ .identifier = field_name }; + break :field .{ .payload_identifier = ip.stringToSlice(field_name) }; + } else .{ .identifier = ip.stringToSlice(field_name) }; const a = try Assignment.start(f, writer, payload_ty); try f.writeCValueMember(writer, local, field); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index dc1749d42e..81ca1dd80d 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1953,11 +1953,11 @@ pub const CType = extern union { .name = try if (ty.isSimpleTuple(mod)) std.fmt.allocPrintZ(arena, "f{}", .{field_i}) else - arena.dupeZ(u8, switch (zig_ty_tag) { + arena.dupeZ(u8, mod.intern_pool.stringToSlice(switch (zig_ty_tag) { .Struct => ty.structFieldName(field_i, mod), .Union => ty.unionFields(mod).keys()[field_i], else => unreachable, - }), + })), .type = store.set.typeToIndex(field_ty, mod, switch (kind) { .forward, .forward_parameter => .forward, .complete, .parameter, .payload => .complete, @@ -2102,12 +2102,13 @@ pub const CType = extern union { }) or !mem.eql( u8, if (ty.isSimpleTuple(mod)) - std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable - else switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i, mod), - .Union => ty.unionFields(mod).keys()[field_i], - else => unreachable, - }, + std.fmt.bufPrintZ(&name_buf, "f{}", .{field_i}) catch unreachable + else + mod.intern_pool.stringToSlice(switch (zig_ty_tag) { + .Struct => ty.structFieldName(field_i, mod), + .Union => ty.unionFields(mod).keys()[field_i], + else => unreachable, + }), mem.span(c_field.name), ) or AlignAs.fieldAlign(ty, field_i, mod).@"align" != c_field.alignas.@"align") return false; @@ -2225,11 +2226,12 @@ pub const CType = extern union { }); hasher.update(if (ty.isSimpleTuple(mod)) std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable - else switch (zig_ty_tag) { - .Struct => ty.structFieldName(field_i, mod), - .Union => ty.unionFields(mod).keys()[field_i], - else => unreachable, - }); + else + mod.intern_pool.stringToSlice(switch (zig_ty_tag) { + .Struct => ty.structFieldName(field_i, mod), + .Union => ty.unionFields(mod).keys()[field_i], + else => unreachable, + })); autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align"); } }, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index be6ca714a6..bd50528095 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -585,13 +585,13 @@ pub const Object = struct { const slice_ty = Type.slice_const_u8_sentinel_0; const slice_alignment = slice_ty.abiAlignment(mod); - const error_name_list = mod.error_name_list.items; + const error_name_list = mod.global_error_set.keys(); const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len); defer mod.gpa.free(llvm_errors); llvm_errors[0] = llvm_slice_ty.getUndef(); - for (llvm_errors[1..], 0..) |*llvm_error, i| { - const name = error_name_list[1..][i]; + for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name_nts| { + const name = mod.intern_pool.stringToSlice(name_nts); const str_init = self.context.constString(name.ptr, @intCast(c_uint, name.len), .False); const str_global = self.llvm_module.addGlobal(str_init.typeOf(), ""); str_global.setInitializer(str_init); @@ -671,7 +671,7 @@ pub const Object = struct { const llvm_global = entry.value_ptr.*; // Same logic as below but for externs instead of exports. const decl = mod.declPtr(decl_index); - const other_global = object.getLlvmGlobal(decl.name) orelse continue; + const other_global = object.getLlvmGlobal(mod.intern_pool.stringToSlice(decl.name)) orelse continue; if (other_global == llvm_global) continue; llvm_global.replaceAllUsesWith(other_global); @@ -689,8 +689,7 @@ pub const Object = struct { // case, we need to replace all uses of it with this exported global. // TODO update std.builtin.ExportOptions to have the name be a // null-terminated slice. - const exp_name_z = try mod.gpa.dupeZ(u8, exp.options.name); - defer mod.gpa.free(exp_name_z); + const exp_name_z = mod.intern_pool.stringToSlice(exp.name); const other_global = object.getLlvmGlobal(exp_name_z.ptr) orelse continue; if (other_global == llvm_global) continue; @@ -923,9 +922,8 @@ pub const Object = struct { dg.addFnAttrString(llvm_func, "no-stack-arg-probe", ""); } - if (decl.@"linksection") |section| { + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| llvm_func.setSection(section); - } // Remove all the basic blocks of a function in order to start over, generating // LLVM IR from an empty function body. @@ -1173,7 +1171,7 @@ pub const Object = struct { 0; const subprogram = dib.createFunction( di_file.?.toScope(), - decl.name, + mod.intern_pool.stringToSlice(decl.name), llvm_func.getValueName(), di_file.?, line_number, @@ -1273,22 +1271,26 @@ pub const Object = struct { if (decl.isExtern(mod)) { var free_decl_name = false; const decl_name = decl_name: { + const decl_name = mod.intern_pool.stringToSlice(decl.name); + if (mod.getTarget().isWasm() and try decl.isFunction(mod)) { if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { free_decl_name = true; - break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ decl.name, lib_name }); + break :decl_name try std.fmt.allocPrintZ(gpa, "{s}|{s}", .{ + decl_name, lib_name, + }); } } } - break :decl_name std.mem.span(decl.name); + + break :decl_name decl_name; }; defer if (free_decl_name) gpa.free(decl_name); llvm_global.setValueName(decl_name); if (self.getLlvmGlobal(decl_name)) |other_global| { if (other_global != llvm_global) { - log.debug("updateDeclExports isExtern()=true setValueName({s}) conflict", .{decl.name}); try self.extern_collisions.put(gpa, decl_index, {}); } } @@ -1298,11 +1300,11 @@ pub const Object = struct { if (self.di_map.get(decl)) |di_node| { if (try decl.isFunction(mod)) { const di_func = @ptrCast(*llvm.DISubprogram, di_node); - const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name)); + const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); di_func.replaceLinkageName(linkage_name); } else { const di_global = @ptrCast(*llvm.DIGlobalVariable, di_node); - const linkage_name = llvm.MDString.get(self.context, decl.name, std.mem.len(decl.name)); + const linkage_name = llvm.MDString.get(self.context, decl_name.ptr, decl_name.len); di_global.replaceLinkageName(linkage_name); } } @@ -1317,7 +1319,7 @@ pub const Object = struct { } } } else if (exports.len != 0) { - const exp_name = exports[0].options.name; + const exp_name = mod.intern_pool.stringToSlice(exports[0].name); llvm_global.setValueName2(exp_name.ptr, exp_name.len); llvm_global.setUnnamedAddr(.False); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.DLLExport); @@ -1332,21 +1334,19 @@ pub const Object = struct { di_global.replaceLinkageName(linkage_name); } } - switch (exports[0].options.linkage) { + switch (exports[0].linkage) { .Internal => unreachable, .Strong => llvm_global.setLinkage(.External), .Weak => llvm_global.setLinkage(.WeakODR), .LinkOnce => llvm_global.setLinkage(.LinkOnceODR), } - switch (exports[0].options.visibility) { + switch (exports[0].visibility) { .default => llvm_global.setVisibility(.Default), .hidden => llvm_global.setVisibility(.Hidden), .protected => llvm_global.setVisibility(.Protected), } - if (exports[0].options.section) |section| { - const section_z = try gpa.dupeZ(u8, section); - defer gpa.free(section_z); - llvm_global.setSection(section_z); + if (mod.intern_pool.stringToSliceUnwrap(exports[0].section)) |section| { + llvm_global.setSection(section); } if (decl.val.getVariable(mod)) |variable| { if (variable.is_threadlocal) { @@ -1356,13 +1356,12 @@ pub const Object = struct { // If a Decl is exported more than one time (which is rare), // we add aliases for all but the first export. - // TODO LLVM C API does not support deleting aliases. We need to - // patch it to support this or figure out how to wrap the C++ API ourselves. + // TODO LLVM C API does not support deleting aliases. + // The planned solution to this is https://github.com/ziglang/zig/issues/13265 // Until then we iterate over existing aliases and make them point // to the correct decl, or otherwise add a new alias. Old aliases are leaked. for (exports[1..]) |exp| { - const exp_name_z = try gpa.dupeZ(u8, exp.options.name); - defer gpa.free(exp_name_z); + const exp_name_z = mod.intern_pool.stringToSlice(exp.name); if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| { alias.setAliasee(llvm_global); @@ -1376,8 +1375,7 @@ pub const Object = struct { } } } else { - const fqn = try decl.getFullyQualifiedName(mod); - defer gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); llvm_global.setValueName2(fqn.ptr, fqn.len); llvm_global.setLinkage(.Internal); if (mod.wantDllExports()) llvm_global.setDLLStorageClass(.Default); @@ -2092,8 +2090,7 @@ pub const Object = struct { const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = field_offset + field_size; - const field_name = try gpa.dupeZ(u8, fields.keys()[field_and_index.index]); - defer gpa.free(field_name); + const field_name = mod.intern_pool.stringToSlice(fields.keys()[field_and_index.index]); try di_fields.append(gpa, dib.createMemberType( fwd_decl.toScope(), @@ -2200,12 +2197,9 @@ pub const Object = struct { const field_size = field.ty.abiSize(mod); const field_align = field.normalAlignment(mod); - const field_name_copy = try gpa.dupeZ(u8, field_name); - defer gpa.free(field_name_copy); - di_fields.appendAssumeCapacity(dib.createMemberType( fwd_decl.toScope(), - field_name_copy, + mod.intern_pool.stringToSlice(field_name), null, // file 0, // line field_size * 8, // size in bits @@ -2327,7 +2321,7 @@ pub const Object = struct { if (fn_info.return_type.toType().isError(mod) and o.module.comp.bin_file.options.error_return_tracing) { - const ptr_ty = try mod.singleMutPtrType(o.getStackTraceType()); + const ptr_ty = try mod.singleMutPtrType(try o.getStackTraceType()); try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } @@ -2384,7 +2378,7 @@ pub const Object = struct { const fields: [0]*llvm.DIType = .{}; return o.di_builder.?.createStructType( try o.namespaceToDebugScope(decl.src_namespace), - decl.name, // TODO use fully qualified name + mod.intern_pool.stringToSlice(decl.name), // TODO use fully qualified name try o.getDIFile(o.gpa, mod.namespacePtr(decl.src_namespace).file_scope), decl.src_line + 1, 0, // size in bits @@ -2399,18 +2393,18 @@ pub const Object = struct { ); } - fn getStackTraceType(o: *Object) Type { + fn getStackTraceType(o: *Object) Allocator.Error!Type { const mod = o.module; const std_pkg = mod.main_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; - const builtin_str: []const u8 = "builtin"; + const builtin_str = try mod.intern_pool.getOrPutString(mod.gpa, "builtin"); const std_namespace = mod.namespacePtr(mod.declPtr(std_file.root_decl.unwrap().?).src_namespace); const builtin_decl = std_namespace.decls .getKeyAdapted(builtin_str, Module.DeclAdapter{ .mod = mod }).?; - const stack_trace_str: []const u8 = "StackTrace"; + const stack_trace_str = try mod.intern_pool.getOrPutString(mod.gpa, "StackTrace"); // buffer is only used for int_type, `builtin` is a struct. const builtin_ty = mod.declPtr(builtin_decl).val.toType(); const builtin_namespace = builtin_ty.getNamespace(mod).?; @@ -2452,16 +2446,13 @@ pub const DeclGen = struct { const decl_index = dg.decl_index; assert(decl.has_tv); - log.debug("gen: {s} type: {}, value: {}", .{ - decl.name, decl.ty.fmtDebug(), decl.val.fmtDebug(), - }); if (decl.val.getExternFunc(mod)) |extern_func| { _ = try dg.resolveLlvmFunction(extern_func.decl); } else { const target = mod.getTarget(); var global = try dg.resolveGlobalDecl(decl_index); global.setAlignment(decl.getAlignment(mod)); - if (decl.@"linksection") |section| global.setSection(section); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| global.setSection(s); assert(decl.has_tv); const init_val = if (decl.val.getVariable(mod)) |variable| init_val: { break :init_val variable.init; @@ -2495,7 +2486,8 @@ pub const DeclGen = struct { new_global.setLinkage(global.getLinkage()); new_global.setUnnamedAddr(global.getUnnamedAddress()); new_global.setAlignment(global.getAlignment()); - if (decl.@"linksection") |section| new_global.setSection(section); + if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s| + new_global.setSection(s); new_global.setInitializer(llvm_init); // TODO: How should this work then the address space of a global changed? global.replaceAllUsesWith(new_global); @@ -2513,7 +2505,7 @@ pub const DeclGen = struct { const is_internal_linkage = !dg.module.decl_exports.contains(decl_index); const di_global = dib.createGlobalVariableExpression( di_file.toScope(), - decl.name, + mod.intern_pool.stringToSlice(decl.name), global.getValueName(), di_file, line_number, @@ -2544,8 +2536,7 @@ pub const DeclGen = struct { const fn_type = try dg.lowerType(zig_fn_type); - const fqn = try decl.getFullyQualifiedName(mod); - defer dg.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); const llvm_fn = dg.llvmModule().addFunctionInAddressSpace(fqn, fn_type, llvm_addrspace); @@ -2557,7 +2548,7 @@ pub const DeclGen = struct { llvm_fn.setUnnamedAddr(.True); } else { if (target.isWasm()) { - dg.addFnAttrString(llvm_fn, "wasm-import-name", std.mem.sliceTo(decl.name, 0)); + dg.addFnAttrString(llvm_fn, "wasm-import-name", mod.intern_pool.stringToSlice(decl.name)); if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { dg.addFnAttrString(llvm_fn, "wasm-import-module", lib_name); @@ -2699,8 +2690,7 @@ pub const DeclGen = struct { const mod = dg.module; const decl = mod.declPtr(decl_index); - const fqn = try decl.getFullyQualifiedName(mod); - defer dg.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const target = mod.getTarget(); @@ -2716,7 +2706,7 @@ pub const DeclGen = struct { // This is needed for declarations created by `@extern`. if (decl.isExtern(mod)) { - llvm_global.setValueName(decl.name); + llvm_global.setValueName(mod.intern_pool.stringToSlice(decl.name)); llvm_global.setUnnamedAddr(.False); llvm_global.setLinkage(.External); if (decl.val.getVariable(mod)) |variable| { @@ -2811,8 +2801,7 @@ pub const DeclGen = struct { if (gop.found_existing) return gop.value_ptr.*; const opaque_type = mod.intern_pool.indexToKey(t.toIntern()).opaque_type; - const name = try mod.opaqueFullyQualifiedName(opaque_type); - defer gpa.free(name); + const name = mod.intern_pool.stringToSlice(try mod.opaqueFullyQualifiedName(opaque_type)); const llvm_struct_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls @@ -2963,8 +2952,7 @@ pub const DeclGen = struct { return int_llvm_ty; } - const name = try struct_obj.getFullyQualifiedName(mod); - defer gpa.free(name); + const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(mod)); const llvm_struct_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls @@ -3040,8 +3028,7 @@ pub const DeclGen = struct { return enum_tag_llvm_ty; } - const name = try union_obj.getFullyQualifiedName(mod); - defer gpa.free(name); + const name = mod.intern_pool.stringToSlice(try union_obj.getFullyQualifiedName(mod)); const llvm_union_ty = dg.context.structCreateNamed(name); gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls @@ -3119,7 +3106,7 @@ pub const DeclGen = struct { if (fn_info.return_type.toType().isError(mod) and mod.comp.bin_file.options.error_return_tracing) { - const ptr_ty = try mod.singleMutPtrType(dg.object.getStackTraceType()); + const ptr_ty = try mod.singleMutPtrType(try dg.object.getStackTraceType()); try llvm_params.append(try dg.lowerType(ptr_ty)); } @@ -3266,9 +3253,8 @@ pub const DeclGen = struct { }, .err => |err| { const llvm_ty = try dg.lowerType(Type.anyerror); - const name = mod.intern_pool.stringToSlice(err.name); - const kv = try mod.getErrorValue(name); - return llvm_ty.constInt(kv.value, .False); + const int = try mod.getErrorValue(err.name); + return llvm_ty.constInt(int, .False); }, .error_union => |error_union| { const err_tv: TypedValue = switch (error_union.val) { @@ -5960,8 +5946,7 @@ pub const FuncGen = struct { .base_line = self.base_line, }); - const fqn = try decl.getFullyQualifiedName(mod); - defer self.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const is_internal_linkage = !mod.decl_exports.contains(decl_index); const fn_ty = try mod.funcType(.{ @@ -5981,7 +5966,7 @@ pub const FuncGen = struct { }); const subprogram = dib.createFunction( di_file.toScope(), - decl.name, + mod.intern_pool.stringToSlice(decl.name), fqn, di_file, line_number, @@ -8629,9 +8614,8 @@ pub const FuncGen = struct { const end_block = self.context.appendBasicBlock(self.llvm_func, "End"); const switch_instr = self.builder.buildSwitch(operand, invalid_block, @intCast(c_uint, names.len)); - for (names) |name_ip| { - const name = mod.intern_pool.stringToSlice(name_ip); - const err_int = mod.global_error_set.get(name).?; + for (names) |name| { + const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); const this_tag_int_value = try self.dg.lowerValue(.{ .ty = Type.err_int, .val = try mod.intValue(Type.err_int, err_int), @@ -8681,8 +8665,7 @@ pub const FuncGen = struct { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); - defer self.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod)); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_is_named_enum_value_{s}", .{fqn}); const param_types = [_]*llvm.Type{try self.dg.lowerType(enum_type.tag_ty.toType())}; @@ -8754,8 +8737,7 @@ pub const FuncGen = struct { defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod); - defer self.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_type.decl).getFullyQualifiedName(mod)); const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); const slice_ty = Type.slice_const_u8_sentinel_0; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 85caec9490..4fd91aded4 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -593,7 +593,6 @@ pub const DeclGen = struct { .extern_func => unreachable, // TODO else => { const result_id = dg.spv.allocId(); - log.debug("addDeclRef: id = {}, index = {}, name = {s}", .{ result_id.id, @enumToInt(spv_decl_index), decl.name }); try self.decl_deps.put(spv_decl_index, {}); @@ -664,9 +663,8 @@ pub const DeclGen = struct { => unreachable, // non-runtime values .int => try self.addInt(ty, val), .err => |err| { - const name = mod.intern_pool.stringToSlice(err.name); - const kv = try mod.getErrorValue(name); - try self.addConstInt(u16, @intCast(u16, kv.value)); + const int = try mod.getErrorValue(err.name); + try self.addConstInt(u16, @intCast(u16, int)); }, .error_union => |error_union| { const payload_ty = ty.errorUnionPayload(mod); @@ -1288,8 +1286,7 @@ pub const DeclGen = struct { member_index += 1; } - const name = try struct_obj.getFullyQualifiedName(self.module); - defer self.module.gpa.free(name); + const name = mod.intern_pool.stringToSlice(try struct_obj.getFullyQualifiedName(self.module)); return try self.spv.resolve(.{ .struct_type = .{ .name = try self.spv.resolveString(name), @@ -1500,7 +1497,6 @@ pub const DeclGen = struct { const spv_decl_index = try self.resolveDecl(self.decl_index); const decl_id = self.spv.declPtr(spv_decl_index).result_id; - log.debug("genDecl: id = {}, index = {}, name = {s}", .{ decl_id.id, @enumToInt(spv_decl_index), decl.name }); if (decl.val.getFunction(mod)) |_| { assert(decl.ty.zigTypeTag(mod) == .Fn); @@ -1542,8 +1538,7 @@ pub const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - const fqn = try decl.getFullyQualifiedName(self.module); - defer self.module.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(self.module)); try self.spv.sections.debug_names.emit(self.gpa, .OpName, .{ .target = decl_id, diff --git a/src/link.zig b/src/link.zig index a44a7387e9..e43153f0b1 100644 --- a/src/link.zig +++ b/src/link.zig @@ -502,8 +502,6 @@ pub const File = struct { /// of the final binary. pub fn lowerUnnamedConst(base: *File, tv: TypedValue, decl_index: Module.Decl.Index) UpdateDeclError!u32 { if (build_options.only_c) @compileError("unreachable"); - const decl = base.options.module.?.declPtr(decl_index); - log.debug("lowerUnnamedConst {*} ({s})", .{ decl, decl.name }); switch (base.tag) { // zig fmt: off .coff => return @fieldParentPtr(Coff, "base", base).lowerUnnamedConst(tv, decl_index), @@ -543,7 +541,6 @@ pub const File = struct { /// May be called before or after updateDeclExports for any given Decl. pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void { const decl = module.declPtr(decl_index); - log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmt(module) }); assert(decl.has_tv); if (build_options.only_c) { assert(base.tag == .c); @@ -566,10 +563,6 @@ pub const File = struct { /// May be called before or after updateDeclExports for any given Decl. pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void { const func = module.funcPtr(func_index); - const owner_decl = module.declPtr(func.owner_decl); - log.debug("updateFunc {*} ({s}), type={}", .{ - owner_decl, owner_decl.name, owner_decl.ty.fmt(module), - }); if (build_options.only_c) { assert(base.tag == .c); return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness); @@ -590,9 +583,6 @@ pub const File = struct { pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void { const decl = module.declPtr(decl_index); - log.debug("updateDeclLineNumber {*} ({s}), line={}", .{ - decl, decl.name, decl.src_line + 1, - }); assert(decl.has_tv); if (build_options.only_c) { assert(base.tag == .c); @@ -868,7 +858,6 @@ pub const File = struct { exports: []const *Module.Export, ) UpdateDeclExportsError!void { const decl = module.declPtr(decl_index); - log.debug("updateDeclExports {*} ({s})", .{ decl, decl.name }); assert(decl.has_tv); if (build_options.only_c) { assert(base.tag == .c); diff --git a/src/link/C.zig b/src/link/C.zig index c871d8a02a..8bfaf1553c 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -6,6 +6,7 @@ const fs = std.fs; const C = @This(); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); @@ -289,11 +290,11 @@ pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !vo } { - var export_names = std.StringHashMapUnmanaged(void){}; + var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; defer export_names.deinit(gpa); try export_names.ensureTotalCapacity(gpa, @intCast(u32, module.decl_exports.entries.len)); for (module.decl_exports.values()) |exports| for (exports.items) |@"export"| - try export_names.put(gpa, @"export".options.name, {}); + try export_names.put(gpa, @"export".name, {}); while (f.remaining_decls.popOrNull()) |kv| { const decl_index = kv.key; @@ -553,7 +554,7 @@ fn flushDecl( self: *C, f: *Flush, decl_index: Module.Decl.Index, - export_names: std.StringHashMapUnmanaged(void), + export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), ) FlushDeclError!void { const gpa = self.base.allocator; const mod = self.base.options.module.?; @@ -571,7 +572,7 @@ fn flushDecl( try self.flushLazyFns(f, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); - if (!(decl.isExtern(mod) and export_names.contains(mem.span(decl.name)))) + if (!(decl.isExtern(mod) and export_names.contains(decl.name))) f.appendBufAssumeCapacity(decl_block.fwd_decl.items); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 8b76e8dd69..fec6a86b91 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1097,8 +1097,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In const atom_index = try self.createAtom(); const sym_name = blk: { - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const index = unnamed_consts.items.len; break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index }); @@ -1324,12 +1323,10 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 { } fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, complex_type: coff.ComplexType) !void { - const gpa = self.base.allocator; const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); const required_alignment = decl.getAlignment(mod); @@ -1420,6 +1417,8 @@ pub fn updateDeclExports( @panic("Attempted to compile for object format that was disabled by build configuration"); } + const ip = &mod.intern_pool; + if (build_options.have_llvm) { // Even in the case of LLVM, we need to notice certain exported symbols in order to // detect the default subsystem. @@ -1431,20 +1430,20 @@ pub fn updateDeclExports( else => std.builtin.CallingConvention.C, }; const decl_cc = exported_decl.ty.fnCallingConvention(mod); - if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and + if (decl_cc == .C and ip.stringEqlSlice(exp.name, "main") and self.base.options.link_libc) { mod.stage1_flags.have_c_main = true; } else if (decl_cc == winapi_cc and self.base.options.target.os.tag == .windows) { - if (mem.eql(u8, exp.options.name, "WinMain")) { + if (ip.stringEqlSlice(exp.name, "WinMain")) { mod.stage1_flags.have_winmain = true; - } else if (mem.eql(u8, exp.options.name, "wWinMain")) { + } else if (ip.stringEqlSlice(exp.name, "wWinMain")) { mod.stage1_flags.have_wwinmain = true; - } else if (mem.eql(u8, exp.options.name, "WinMainCRTStartup")) { + } else if (ip.stringEqlSlice(exp.name, "WinMainCRTStartup")) { mod.stage1_flags.have_winmain_crt_startup = true; - } else if (mem.eql(u8, exp.options.name, "wWinMainCRTStartup")) { + } else if (ip.stringEqlSlice(exp.name, "wWinMainCRTStartup")) { mod.stage1_flags.have_wwinmain_crt_startup = true; - } else if (mem.eql(u8, exp.options.name, "DllMainCRTStartup")) { + } else if (ip.stringEqlSlice(exp.name, "DllMainCRTStartup")) { mod.stage1_flags.have_dllmain_crt_startup = true; } } @@ -1453,9 +1452,6 @@ pub fn updateDeclExports( if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports); } - const tracy = trace(@src()); - defer tracy.end(); - const gpa = self.base.allocator; const decl = mod.declPtr(decl_index); @@ -1465,12 +1461,13 @@ pub fn updateDeclExports( const decl_metadata = self.decls.getPtr(decl_index).?; for (exports) |exp| { - log.debug("adding new export '{s}'", .{exp.options.name}); + const exp_name = mod.intern_pool.stringToSlice(exp.name); + log.debug("adding new export '{s}'", .{exp_name}); - if (exp.options.section) |section_name| { + if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { if (!mem.eql(u8, section_name, ".text")) { try mod.failed_exports.putNoClobber( - mod.gpa, + gpa, exp, try Module.ErrorMsg.create( gpa, @@ -1483,9 +1480,9 @@ pub fn updateDeclExports( } } - if (exp.options.linkage == .LinkOnce) { + if (exp.linkage == .LinkOnce) { try mod.failed_exports.putNoClobber( - mod.gpa, + gpa, exp, try Module.ErrorMsg.create( gpa, @@ -1497,19 +1494,19 @@ pub fn updateDeclExports( continue; } - const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: { + const sym_index = decl_metadata.getExport(self, exp_name) orelse blk: { const sym_index = try self.allocateSymbol(); try decl_metadata.exports.append(gpa, sym_index); break :blk sym_index; }; const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null }; const sym = self.getSymbolPtr(sym_loc); - try self.setSymbolName(sym, exp.options.name); + try self.setSymbolName(sym, exp_name); sym.value = decl_sym.value; sym.section_number = @intToEnum(coff.SectionNumber, self.text_section_index.? + 1); sym.type = .{ .complex_type = .FUNCTION, .base_type = .NULL }; - switch (exp.options.linkage) { + switch (exp.linkage) { .Strong => { sym.storage_class = .EXTERNAL; }, @@ -1522,9 +1519,15 @@ pub fn updateDeclExports( } } -pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void { +pub fn deleteDeclExport( + self: *Coff, + decl_index: Module.Decl.Index, + name_ip: InternPool.NullTerminatedString, +) void { if (self.llvm_object) |_| return; const metadata = self.decls.getPtr(decl_index) orelse return; + const mod = self.base.options.module.?; + const name = mod.intern_pool.stringToSlice(name_ip); const sym_index = metadata.getExportPtr(self, name) orelse return; const gpa = self.base.allocator; @@ -2540,6 +2543,7 @@ const ImportTable = @import("Coff/ImportTable.zig"); const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Object = @import("Coff/Object.zig"); const Relocation = @import("Coff/Relocation.zig"); const TableSection = @import("table_section.zig").TableSection; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 9d8076f592..b9b7772260 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -358,8 +358,9 @@ pub const DeclState = struct { struct_obj.fields.keys(), struct_obj.fields.values(), 0.., - ) |field_name, field, field_index| { + ) |field_name_ip, field, field_index| { if (!field.ty.hasRuntimeBits(mod)) continue; + const field_name = mod.intern_pool.stringToSlice(field_name_ip); // DW.AT.member try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member)); @@ -469,7 +470,8 @@ pub const DeclState = struct { // DW.AT.member try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member)); // DW.AT.name, DW.FORM.string - try dbg_info_buffer.writer().print("{s}\x00", .{field_name}); + try dbg_info_buffer.appendSlice(mod.intern_pool.stringToSlice(field_name)); + try dbg_info_buffer.append(0); // DW.AT.type, DW.FORM.ref4 const index = dbg_info_buffer.items.len; try dbg_info_buffer.resize(index + 4); @@ -949,8 +951,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) defer tracy.end(); const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("initDeclState {s}{*}", .{ decl_name, decl }); @@ -1273,7 +1274,6 @@ pub fn commitDeclState( } } - log.debug("updateDeclDebugInfoAllocation for '{s}'", .{decl.name}); try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len)); while (decl_state.abbrev_relocs.popOrNull()) |reloc| { @@ -1345,7 +1345,6 @@ pub fn commitDeclState( } } - log.debug("writeDeclDebugInfo for '{s}", .{decl.name}); try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items); } @@ -2523,15 +2522,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { // TODO: don't create a zig type for this, just make the dwarf info // without touching the zig type system. - const names = try arena.alloc(InternPool.NullTerminatedString, module.global_error_set.count()); - { - var it = module.global_error_set.keyIterator(); - var i: usize = 0; - while (it.next()) |key| : (i += 1) { - names[i] = module.intern_pool.getString(key.*).unwrap().?; - } - } - + const names = try arena.dupe(InternPool.NullTerminatedString, module.global_error_set.keys()); std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan); const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } }); @@ -2682,8 +2673,8 @@ fn addDbgInfoErrorSet( const error_names = ty.errorSetNames(mod); for (error_names) |error_name_ip| { + const int = try mod.getErrorValue(error_name_ip); const error_name = mod.intern_pool.stringToSlice(error_name_ip); - const kv = mod.getErrorValue(error_name) catch unreachable; // DW.AT.enumerator try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64)); dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant)); @@ -2691,7 +2682,7 @@ fn addDbgInfoErrorSet( dbg_info_buffer.appendSliceAssumeCapacity(error_name); dbg_info_buffer.appendAssumeCapacity(0); // DW.AT.const_value, DW.FORM.data8 - mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), kv.value, target_endian); + mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), int, target_endian); } // DW.AT.enumeration_type delimit children diff --git a/src/link/Elf.zig b/src/link/Elf.zig index e4fa07620d..5ac90d4cae 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -28,6 +28,7 @@ const File = link.File; const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Package = @import("../Package.zig"); const StringTable = @import("strtab.zig").StringTable; const TableSection = @import("table_section.zig").TableSection; @@ -2480,8 +2481,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); const required_alignment = decl.getAlignment(mod); @@ -2802,8 +2802,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module const decl = mod.declPtr(decl_index); const name_str_index = blk: { - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const index = unnamed_consts.items.len; const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index }); defer gpa.free(name); @@ -2880,7 +2879,8 @@ pub fn updateDeclExports( try self.global_symbols.ensureUnusedCapacity(gpa, exports.len); for (exports) |exp| { - if (exp.options.section) |section_name| { + const exp_name = mod.intern_pool.stringToSlice(exp.name); + if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { if (!mem.eql(u8, section_name, ".text")) { try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1); mod.failed_exports.putAssumeCapacityNoClobber( @@ -2890,11 +2890,11 @@ pub fn updateDeclExports( continue; } } - const stb_bits: u8 = switch (exp.options.linkage) { + const stb_bits: u8 = switch (exp.linkage) { .Internal => elf.STB_LOCAL, .Strong => blk: { const entry_name = self.base.options.entry orelse "_start"; - if (mem.eql(u8, exp.options.name, entry_name)) { + if (mem.eql(u8, exp_name, entry_name)) { self.entry_addr = decl_sym.st_value; } break :blk elf.STB_GLOBAL; @@ -2910,10 +2910,10 @@ pub fn updateDeclExports( }, }; const stt_bits: u8 = @truncate(u4, decl_sym.st_info); - if (decl_metadata.getExport(self, exp.options.name)) |i| { + if (decl_metadata.getExport(self, exp_name)) |i| { const sym = &self.global_symbols.items[i]; sym.* = .{ - .st_name = try self.shstrtab.insert(gpa, exp.options.name), + .st_name = try self.shstrtab.insert(gpa, exp_name), .st_info = (stb_bits << 4) | stt_bits, .st_other = 0, .st_shndx = shdr_index, @@ -2927,7 +2927,7 @@ pub fn updateDeclExports( }; try decl_metadata.exports.append(gpa, @intCast(u32, i)); self.global_symbols.items[i] = .{ - .st_name = try self.shstrtab.insert(gpa, exp.options.name), + .st_name = try self.shstrtab.insert(gpa, exp_name), .st_info = (stb_bits << 4) | stt_bits, .st_other = 0, .st_shndx = shdr_index, @@ -2944,8 +2944,7 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.In defer tracy.end(); const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl }); @@ -2955,11 +2954,15 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.In } } -pub fn deleteDeclExport(self: *Elf, decl_index: Module.Decl.Index, name: []const u8) void { +pub fn deleteDeclExport( + self: *Elf, + decl_index: Module.Decl.Index, + name: InternPool.NullTerminatedString, +) void { if (self.llvm_object) |_| return; const metadata = self.decls.getPtr(decl_index) orelse return; - const sym_index = metadata.getExportPtr(self, name) orelse return; - log.debug("deleting export '{s}'", .{name}); + const mod = self.base.options.module.?; + const sym_index = metadata.getExportPtr(self, mod.intern_pool.stringToSlice(name)) orelse return; self.global_symbol_free_list.append(self.base.allocator, sym_index.*) catch {}; self.global_symbols.items[sym_index.*].st_info = 0; sym_index.* = 0; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index f7f975f920..70993e8dc6 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -40,6 +40,7 @@ const Liveness = @import("../Liveness.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Md5 = std.crypto.hash.Md5; const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Relocation = @import("MachO/Relocation.zig"); const StringTable = @import("strtab.zig").StringTable; const TableSection = @import("table_section.zig").TableSection; @@ -1921,8 +1922,7 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu const unnamed_consts = gop.value_ptr; const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const name_str_index = blk: { const index = unnamed_consts.items.len; @@ -2206,8 +2206,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D const required_alignment = decl.getAlignment(mod); - const decl_name = try decl.getFullyQualifiedName(module); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(module)); const init_sym_name = try std.fmt.allocPrint(gpa, "{s}$tlv$init", .{decl_name}); defer gpa.free(init_sym_name); @@ -2306,8 +2305,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64 const required_alignment = decl.getAlignment(mod); - const decl_name = try decl.getFullyQualifiedName(mod); - defer gpa.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; @@ -2403,12 +2401,14 @@ pub fn updateDeclExports( const decl_metadata = self.decls.getPtr(decl_index).?; for (exports) |exp| { - const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name}); + const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{ + mod.intern_pool.stringToSlice(exp.name), + }); defer gpa.free(exp_name); log.debug("adding new export '{s}'", .{exp_name}); - if (exp.options.section) |section_name| { + if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { if (!mem.eql(u8, section_name, "__text")) { try mod.failed_exports.putNoClobber( mod.gpa, @@ -2424,7 +2424,7 @@ pub fn updateDeclExports( } } - if (exp.options.linkage == .LinkOnce) { + if (exp.linkage == .LinkOnce) { try mod.failed_exports.putNoClobber( mod.gpa, exp, @@ -2453,7 +2453,7 @@ pub fn updateDeclExports( .n_value = decl_sym.n_value, }; - switch (exp.options.linkage) { + switch (exp.linkage) { .Internal => { // Symbol should be hidden, or in MachO lingo, private extern. // We should also mark the symbol as Weak: n_desc == N_WEAK_DEF. @@ -2488,12 +2488,17 @@ pub fn updateDeclExports( } } -pub fn deleteDeclExport(self: *MachO, decl_index: Module.Decl.Index, name: []const u8) Allocator.Error!void { +pub fn deleteDeclExport( + self: *MachO, + decl_index: Module.Decl.Index, + name: InternPool.NullTerminatedString, +) Allocator.Error!void { if (self.llvm_object) |_| return; const metadata = self.decls.getPtr(decl_index) orelse return; const gpa = self.base.allocator; - const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{name}); + const mod = self.base.options.module.?; + const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{mod.intern_pool.stringToSlice(name)}); defer gpa.free(exp_name); const sym_index = metadata.getExportPtr(self, exp_name) orelse return; diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 0803b6beef..5cf2add528 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -287,7 +287,6 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: self.freeUnnamedConsts(decl_index); _ = try self.seeDecl(decl_index); - log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -345,8 +344,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I } const unnamed_consts = gop.value_ptr; - const decl_name = try decl.getFullyQualifiedName(mod); - defer self.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); const index = unnamed_consts.items.len; // name is freed when the unnamed const is freed @@ -403,8 +401,6 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo _ = try self.seeDecl(decl_index); - log.debug("codegen decl {*} ({s}) ({d})", .{ decl, decl.name, decl_index }); - var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val; @@ -435,7 +431,6 @@ fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); const is_fn = (decl.ty.zigTypeTag(mod) == .Fn); - log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); const sym_t: aout.Sym.Type = if (is_fn) .t else .d; const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); @@ -446,7 +441,7 @@ fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { const sym: aout.Sym = .{ .value = undefined, // the value of stuff gets filled in in flushModule .type = decl_block.type, - .name = mem.span(decl.name), + .name = mod.intern_pool.stringToSlice(decl.name), }; if (decl_block.sym_index) |s| { @@ -567,10 +562,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No var it = fentry.value_ptr.functions.iterator(); while (it.next()) |entry| { const decl_index = entry.key_ptr.*; - const decl = mod.declPtr(decl_index); const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); const out = entry.value_ptr.*; - log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line }); { // connect the previous decl to the next const delta_line = @intCast(i32, out.start_line) - @intCast(i32, linecount); @@ -616,10 +609,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No var it = self.data_decl_table.iterator(); while (it.next()) |entry| { const decl_index = entry.key_ptr.*; - const decl = mod.declPtr(decl_index); const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); const code = entry.value_ptr.*; - log.debug("write data decl {*} ({s})", .{ decl, decl.name }); foff += code.len; iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; @@ -695,15 +686,12 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const source_decl = mod.declPtr(source_decl_index); for (kv.value_ptr.items) |reloc| { const target_decl_index = reloc.target; - const target_decl = mod.declPtr(target_decl_index); const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index); const target_decl_offset = target_decl_block.offset.?; const offset = reloc.offset; const addend = reloc.addend; - log.debug("relocating the address of '{s}' + {d} into '{s}' + {d}", .{ target_decl.name, addend, source_decl.name, offset }); - const code = blk: { const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn; if (is_fn) { @@ -737,8 +725,9 @@ fn addDeclExports( const decl_block = self.getDeclBlock(metadata.index); for (exports) |exp| { + const exp_name = mod.intern_pool.stringToSlice(exp.name); // plan9 does not support custom sections - if (exp.options.section) |section_name| { + if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section_name| { if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( self.base.allocator, @@ -752,10 +741,10 @@ fn addDeclExports( const sym = .{ .value = decl_block.offset.?, .type = decl_block.type.toGlobal(), - .name = exp.options.name, + .name = exp_name, }; - if (metadata.getExport(self, exp.options.name)) |i| { + if (metadata.getExport(self, exp_name)) |i| { self.syms.items[i] = sym; } else { try self.syms.append(self.base.allocator, sym); @@ -956,7 +945,10 @@ pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void { try w.writeAll(sym.name); try w.writeByte(0); } + pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { + const mod = self.base.options.module.?; + const ip = &mod.intern_pool; const writer = buf.writer(); // write the f symbols { @@ -980,7 +972,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const sym = self.syms.items[decl_block.sym_index.?]; try self.writeSym(writer, sym); if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| { + for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.name))) |exp_i| { try self.writeSym(writer, self.syms.items[exp_i]); }; } @@ -1006,7 +998,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const sym = self.syms.items[decl_block.sym_index.?]; try self.writeSym(writer, sym); if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { - for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| { + for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.name))) |exp_i| { const s = self.syms.items[exp_i]; if (mem.eql(u8, s.name, "_start")) self.entry_val = s.value; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 89d6be1ec8..fbde464c54 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -147,7 +147,7 @@ pub fn updateDeclExports( const spv_decl_index = entry.value_ptr.*; for (exports) |exp| { - try self.spv.declareEntryPoint(spv_decl_index, exp.options.name); + try self.spv.declareEntryPoint(spv_decl_index, mod.intern_pool.stringToSlice(exp.name)); } } @@ -190,7 +190,8 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No var error_info = std.ArrayList(u8).init(self.spv.arena); try error_info.appendSlice("zig_errors"); const module = self.base.options.module.?; - for (module.error_name_list.items) |name| { + for (module.global_error_set.keys()) |name_nts| { + const name = module.intern_pool.stringToSlice(name_nts); // Errors can contain pretty much any character - to encode them in a string we must escape // them somehow. Easiest here is to use some established scheme, one which also preseves the // name if it contains no strange characters is nice for debugging. URI encoding fits the bill. diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 96de121ffb..d57543542a 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1416,7 +1416,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi if (decl.isExtern(mod)) { const variable = decl.getOwnedVariable(mod).?; - const name = mem.sliceTo(decl.name, 0); + const name = mod.intern_pool.stringToSlice(decl.name); const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name); return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null); } @@ -1453,8 +1453,7 @@ pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.I defer tracy.end(); const decl = mod.declPtr(decl_index); - const decl_name = try decl.getFullyQualifiedName(mod); - defer wasm.base.allocator.free(decl_name); + const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl }); try dw.updateDeclLineNumber(mod, decl_index); @@ -1467,8 +1466,7 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8 const atom_index = wasm.decls.get(decl_index).?; const atom = wasm.getAtomPtr(atom_index); const symbol = &wasm.symbols.items[atom.sym_index]; - const full_name = try decl.getFullyQualifiedName(mod); - defer wasm.base.allocator.free(full_name); + const full_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); symbol.name = try wasm.string_table.put(wasm.base.allocator, full_name); try atom.code.appendSlice(wasm.base.allocator, code); try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {}); @@ -1535,9 +1533,10 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In const parent_atom = wasm.getAtomPtr(parent_atom_index); const local_index = parent_atom.locals.items.len; try parent_atom.locals.append(wasm.base.allocator, atom_index); - const fqdn = try decl.getFullyQualifiedName(mod); - defer wasm.base.allocator.free(fqdn); - const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index }); + const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod)); + const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ + fqn, local_index, + }); defer wasm.base.allocator.free(name); var value_bytes = std.ArrayList(u8).init(wasm.base.allocator); defer value_bytes.deinit(); @@ -1690,11 +1689,12 @@ pub fn updateDeclExports( const decl = mod.declPtr(decl_index); const atom_index = try wasm.getOrCreateAtomForDecl(decl_index); const atom = wasm.getAtom(atom_index); + const gpa = mod.gpa; for (exports) |exp| { - if (exp.options.section) |section| { - try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( - mod.gpa, + if (mod.intern_pool.stringToSliceUnwrap(exp.section)) |section| { + try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + gpa, decl.srcLoc(mod), "Unimplemented: ExportOptions.section '{s}'", .{section}, @@ -1702,24 +1702,24 @@ pub fn updateDeclExports( continue; } - const export_name = try wasm.string_table.put(wasm.base.allocator, exp.options.name); + const export_name = try wasm.string_table.put(wasm.base.allocator, mod.intern_pool.stringToSlice(exp.name)); if (wasm.globals.getPtr(export_name)) |existing_loc| { if (existing_loc.index == atom.sym_index) continue; const existing_sym: Symbol = existing_loc.getSymbol(wasm).*; - const exp_is_weak = exp.options.linkage == .Internal or exp.options.linkage == .Weak; + const exp_is_weak = exp.linkage == .Internal or exp.linkage == .Weak; // When both the to-be-exported symbol and the already existing symbol // are strong symbols, we have a linker error. // In the other case we replace one with the other. if (!exp_is_weak and !existing_sym.isWeak()) { - try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create( - mod.gpa, + try mod.failed_exports.put(gpa, exp, try Module.ErrorMsg.create( + gpa, decl.srcLoc(mod), \\LinkError: symbol '{s}' defined multiple times \\ first definition in '{s}' \\ next definition in '{s}' , - .{ exp.options.name, wasm.name, wasm.name }, + .{ mod.intern_pool.stringToSlice(exp.name), wasm.name, wasm.name }, )); continue; } else if (exp_is_weak) { @@ -1736,7 +1736,7 @@ pub fn updateDeclExports( const exported_atom = wasm.getAtom(exported_atom_index); const sym_loc = exported_atom.symbolLoc(); const symbol = sym_loc.getSymbol(wasm); - switch (exp.options.linkage) { + switch (exp.linkage) { .Internal => { symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN); }, @@ -1745,8 +1745,8 @@ pub fn updateDeclExports( }, .Strong => {}, // symbols are strong by default .LinkOnce => { - try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create( - mod.gpa, + try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create( + gpa, decl.srcLoc(mod), "Unimplemented: LinkOnce", .{}, @@ -1755,7 +1755,7 @@ pub fn updateDeclExports( }, } // Ensure the symbol will be exported using the given name - if (!mem.eql(u8, exp.options.name, sym_loc.getName(wasm))) { + if (!mod.intern_pool.stringEqlSlice(exp.name, sym_loc.getName(wasm))) { try wasm.export_names.put(wasm.base.allocator, sym_loc, export_name); } @@ -1769,7 +1769,7 @@ pub fn updateDeclExports( // if the symbol was previously undefined, remove it as an import _ = wasm.imports.remove(sym_loc); - _ = wasm.undefs.swapRemove(exp.options.name); + _ = wasm.undefs.swapRemove(mod.intern_pool.stringToSlice(exp.name)); } } @@ -2987,7 +2987,8 @@ fn populateErrorNameTable(wasm: *Wasm) !void { // Addend for each relocation to the table var addend: u32 = 0; const mod = wasm.base.options.module.?; - for (mod.error_name_list.items) |error_name| { + for (mod.global_error_set.keys()) |error_name_nts| { + const error_name = mod.intern_pool.stringToSlice(error_name_nts); const len = @intCast(u32, error_name.len + 1); // names are 0-termianted const slice_ty = Type.slice_const_u8_sentinel_0; diff --git a/src/print_air.zig b/src/print_air.zig index 8da80e1360..eb10477292 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -685,8 +685,9 @@ const Writer = struct { fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_fn = w.air.instructions.items(.data)[inst].ty_fn; const func_index = ty_fn.func; + const ip = &w.module.intern_pool; const owner_decl = w.module.declPtr(w.module.funcPtr(func_index).owner_decl); - try s.print("{s}", .{owner_decl.name}); + try s.print("{s}", .{ip.stringToSlice(owner_decl.name)}); } fn writeDbgVar(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { diff --git a/src/type.zig b/src/type.zig index 61c9377b1d..43aaf3c786 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2546,7 +2546,7 @@ pub const Type = struct { defer mod.gpa.free(field_vals); for (field_vals, s.fields.values()) |*field_val, field| { if (field.is_comptime) { - field_val.* = try field.default_val.intern(field.ty, mod); + field_val.* = field.default_val; continue; } if (try field.ty.onePossibleValue(mod)) |field_opv| { @@ -2977,18 +2977,14 @@ pub const Type = struct { return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names.len; } - pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) [:0]const u8 { - const ip = &mod.intern_pool; - const field_name = ip.indexToKey(ty.toIntern()).enum_type.names[field_index]; - return ip.stringToSlice(field_name); + pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { + return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names[field_index]; } - pub fn enumFieldIndex(ty: Type, field_name: []const u8, mod: *Module) ?u32 { + pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 { const ip = &mod.intern_pool; const enum_type = ip.indexToKey(ty.toIntern()).enum_type; - // If the string is not interned, then the field certainly is not present. - const field_name_interned = ip.getString(field_name).unwrap() orelse return null; - return enum_type.nameIndex(ip, field_name_interned); + return enum_type.nameIndex(ip, field_name); } /// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or @@ -3017,19 +3013,16 @@ pub const Type = struct { } } - pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) []const u8 { - switch (mod.intern_pool.indexToKey(ty.toIntern())) { + pub fn structFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString { + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; assert(struct_obj.haveFieldTypes()); return struct_obj.fields.keys()[field_index]; }, - .anon_struct_type => |anon_struct| { - const name = anon_struct.names[field_index]; - return mod.intern_pool.stringToSlice(name); - }, + .anon_struct_type => |anon_struct| anon_struct.names[field_index], else => unreachable, - } + }; } pub fn structFieldCount(ty: Type, mod: *Module) usize { @@ -3082,7 +3075,10 @@ pub const Type = struct { switch (mod.intern_pool.indexToKey(ty.toIntern())) { .struct_type => |struct_type| { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; - return struct_obj.fields.values()[index].default_val; + const val = struct_obj.fields.values()[index].default_val; + // TODO: avoid using `unreachable` to indicate this. + if (val == .none) return Value.@"unreachable"; + return val.toValue(); }, .anon_struct_type => |anon_struct| { const val = anon_struct.values[index]; @@ -3100,7 +3096,7 @@ pub const Type = struct { const struct_obj = mod.structPtrUnwrap(struct_type.index).?; const field = struct_obj.fields.values()[index]; if (field.is_comptime) { - return field.default_val; + return field.default_val.toValue(); } else { return field.ty.onePossibleValue(mod); } diff --git a/src/value.zig b/src/value.zig index 6a19678d71..2c38852bf5 100644 --- a/src/value.zig +++ b/src/value.zig @@ -24,9 +24,6 @@ pub const Value = struct { /// This union takes advantage of the fact that the first page of memory /// is unmapped, giving us 4096 possible enum tags that have no payload. legacy: extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, ptr_otherwise: *Payload, }, @@ -64,8 +61,6 @@ pub const Value = struct { /// An instance of a union. @"union", - pub const no_payload_count = 0; - pub fn Type(comptime t: Tag) type { return switch (t) { .eu_payload, @@ -96,16 +91,7 @@ pub const Value = struct { } }; - pub fn initTag(small_tag: Tag) Value { - assert(@enumToInt(small_tag) < Tag.no_payload_count); - return Value{ - .ip_index = .none, - .legacy = .{ .tag_if_small_enough = small_tag }, - }; - } - pub fn initPayload(payload: *Payload) Value { - assert(@enumToInt(payload.tag) >= Tag.no_payload_count); return Value{ .ip_index = .none, .legacy = .{ .ptr_otherwise = payload }, @@ -114,11 +100,7 @@ pub const Value = struct { pub fn tag(self: Value) Tag { assert(self.ip_index == .none); - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { - return self.legacy.tag_if_small_enough; - } else { - return self.legacy.ptr_otherwise.tag; - } + return self.legacy.ptr_otherwise.tag; } /// Prefer `castTag` to this. @@ -129,12 +111,7 @@ pub const Value = struct { if (@hasField(T, "base_tag")) { return self.castTag(T.base_tag); } - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { - return null; - } inline for (@typeInfo(Tag).Enum.fields) |field| { - if (field.value < Tag.no_payload_count) - continue; const t = @intToEnum(Tag, field.value); if (self.legacy.ptr_otherwise.tag == t) { if (T == t.Type()) { @@ -149,9 +126,6 @@ pub const Value = struct { pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { if (self.ip_index != .none) return null; - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) - return null; - if (self.legacy.ptr_otherwise.tag == t) return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); @@ -164,12 +138,7 @@ pub const Value = struct { if (self.ip_index != .none) { return Value{ .ip_index = self.ip_index, .legacy = undefined }; } - if (@enumToInt(self.legacy.tag_if_small_enough) < Tag.no_payload_count) { - return Value{ - .ip_index = .none, - .legacy = .{ .tag_if_small_enough = self.legacy.tag_if_small_enough }, - }; - } else switch (self.legacy.ptr_otherwise.tag) { + switch (self.legacy.ptr_otherwise.tag) { .bytes => { const bytes = self.castTag(.bytes).?.data; const new_payload = try arena.create(Payload.Bytes); @@ -312,6 +281,30 @@ pub const Value = struct { } }; } + /// Asserts that the value is representable as an array of bytes. + /// Returns the value as a null-terminated string stored in the InternPool. + pub fn toIpString(val: Value, ty: Type, mod: *Module) !InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .enum_literal => |enum_literal| enum_literal, + .ptr => |ptr| switch (ptr.len) { + .none => unreachable, + else => try arrayToIpString(val, ptr.len.toValue().toUnsignedInt(mod), mod), + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try ip.getOrPutString(mod.gpa, bytes), + .elems => try arrayToIpString(val, ty.arrayLen(mod), mod), + .repeated_elem => |elem| { + const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); + const len = @intCast(usize, ty.arrayLen(mod)); + try ip.string_bytes.appendNTimes(mod.gpa, byte, len); + return ip.getOrPutTrailingString(mod.gpa, len); + }, + }, + else => unreachable, + }; + } + /// Asserts that the value is representable as an array of bytes. /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { @@ -319,11 +312,11 @@ pub const Value = struct { .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), .ptr => |ptr| switch (ptr.len) { .none => unreachable, - else => arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), + else => try arrayToAllocatedBytes(val, ptr.len.toValue().toUnsignedInt(mod), allocator, mod), }, .aggregate => |aggregate| switch (aggregate.storage) { .bytes => |bytes| try allocator.dupe(u8, bytes), - .elems => arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), .repeated_elem => |elem| { const byte = @intCast(u8, elem.toValue().toUnsignedInt(mod)); const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen(mod))); @@ -344,6 +337,23 @@ pub const Value = struct { return result; } + fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + const len = @intCast(usize, len_u64); + try ip.string_bytes.ensureUnusedCapacity(gpa, len); + for (0..len) |i| { + // I don't think elemValue has the possibility to affect ip.string_bytes. Let's + // assert just to be sure. + const prev = ip.string_bytes.items.len; + const elem_val = try val.elemValue(mod, i); + assert(ip.string_bytes.items.len == prev); + const byte = @intCast(u8, elem_val.toUnsignedInt(mod)); + ip.string_bytes.appendAssumeCapacity(byte); + } + return ip.getOrPutTrailingString(gpa, len); + } + pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern(); switch (val.tag()) { @@ -498,7 +508,7 @@ pub const Value = struct { // Assume it is already an integer and return it directly. .simple_type, .int_type => val, .enum_literal => |enum_literal| { - const field_index = ty.enumFieldIndex(ip.stringToSlice(enum_literal), mod).?; + const field_index = ty.enumFieldIndex(enum_literal, mod).?; return switch (ip.indexToKey(ty.toIntern())) { // Assume it is already an integer and return it directly. .simple_type, .int_type => val, @@ -776,7 +786,7 @@ pub const Value = struct { .error_union => |error_union| error_union.val.err_name, else => unreachable, }; - const int = mod.global_error_set.get(mod.intern_pool.stringToSlice(name)).?; + const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian); }, .Union => switch (ty.containerLayout(mod)) { @@ -1028,10 +1038,10 @@ pub const Value = struct { // TODO revisit this when we have the concept of the error tag type const Int = u16; const int = std.mem.readInt(Int, buffer[0..@sizeOf(Int)], endian); - const name = mod.error_name_list.items[@intCast(usize, int)]; + const name = mod.global_error_set.keys()[@intCast(usize, int)]; return (try mod.intern(.{ .err = .{ .ty = ty.toIntern(), - .name = mod.intern_pool.getString(name).unwrap().?, + .name = name, } })).toValue(); }, .Pointer => { @@ -2155,15 +2165,29 @@ pub const Value = struct { /// unreachable. For error unions, prefer `errorUnionIsPayload` to find out whether /// something is an error or not because it works without having to figure out the /// string. - pub fn getError(self: Value, mod: *const Module) ?[]const u8 { - return mod.intern_pool.stringToSliceUnwrap(switch (mod.intern_pool.indexToKey(self.toIntern())) { - .err => |err| err.name.toOptional(), + pub fn getError(val: Value, mod: *const Module) ?[]const u8 { + return switch (getErrorName(val, mod)) { + .empty => null, + else => |s| mod.intern_pool.stringToSlice(s), + }; + } + + pub fn getErrorName(val: Value, mod: *const Module) InternPool.NullTerminatedString { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .err => |err| err.name, .error_union => |error_union| switch (error_union.val) { - .err_name => |err_name| err_name.toOptional(), - .payload => .none, + .err_name => |err_name| err_name, + .payload => .empty, }, else => unreachable, - }); + }; + } + + pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt { + return switch (getErrorName(val, mod)) { + .empty => 0, + else => |s| @intCast(Module.ErrorInt, mod.global_error_set.getIndex(s).?), + }; } /// Assumes the type is an error union. Returns true if and only if the value is @@ -4225,7 +4249,7 @@ pub const Value = struct { var fields: [tags.len]std.builtin.Type.StructField = undefined; for (&fields, tags) |*field, t| field.* = .{ .name = t.name, - .type = *if (t.value < Tag.no_payload_count) void else @field(Tag, t.name).Type(), + .type = *@field(Tag, t.name).Type(), .default_value = null, .is_comptime = false, .alignment = 0, -- cgit v1.2.3 From e23b0a01e6357252eb2c08a83eff9169ce49042c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 2 Jun 2023 18:49:40 -0400 Subject: InternPool: fix yet more key lifetime issues --- src/Module.zig | 15 ++++++----- src/Sema.zig | 61 ++++++++++++++++++++++++++------------------- src/arch/x86_64/CodeGen.zig | 8 ++---- src/codegen.zig | 2 +- src/type.zig | 5 +++- src/value.zig | 5 ++-- 6 files changed, 52 insertions(+), 44 deletions(-) (limited to 'src/arch') diff --git a/src/Module.zig b/src/Module.zig index 9d58029cb5..c1d6b8157a 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5448,7 +5448,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE defer comptime_mutable_decls.deinit(); const fn_ty = decl.ty; - const fn_ty_info = mod.typeToFunc(fn_ty).?; var sema: Sema = .{ .mod = mod, @@ -5459,7 +5458,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE .owner_decl_index = decl_index, .func = func, .func_index = func_index.toOptional(), - .fn_ret_ty = fn_ty_info.return_type.toType(), + .fn_ret_ty = mod.typeToFunc(fn_ty).?.return_type.toType(), .owner_func = func, .owner_func_index = func_index.toOptional(), .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), @@ -5499,7 +5498,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const runtime_params_len = @intCast(u32, fn_ty_info.param_types.len); + const runtime_params_len = @intCast(u32, mod.typeToFunc(fn_ty).?.param_types.len); try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); @@ -5525,7 +5524,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE sema.inst_map.putAssumeCapacityNoClobber(inst, arg); total_param_index += 1; continue; - } else fn_ty_info.param_types[runtime_param_index].toType(); + } else mod.typeToFunc(fn_ty).?.param_types[runtime_param_index].toType(); const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, @@ -5623,7 +5622,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE // Crucially, this happens *after* we set the function state to success above, // so that dependencies on the function body will now be satisfied rather than // result in circular dependency errors. - sema.resolveFnTypes(mod.typeToFunc(fn_ty).?) catch |err| switch (err) { + sema.resolveFnTypes(fn_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, @@ -6378,9 +6377,9 @@ pub fn populateTestFunctions( for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); - // Protects test_decl_name from being invalidated during call to intern() below. - try ip.string_bytes.ensureUnusedCapacity(gpa, ip.stringToSlice(test_decl.name).len + 10); - const test_decl_name = ip.stringToSlice(test_decl.name); + // TODO: write something like getCoercedInts to avoid needing to dupe + const test_decl_name = try gpa.dupe(u8, ip.stringToSlice(test_decl.name)); + defer gpa.free(test_decl_name); const test_name_decl_index = n: { const test_name_decl_ty = try mod.arrayType(.{ .len = test_decl_name.len, diff --git a/src/Sema.zig b/src/Sema.zig index ca4e761cdc..4d9fc201a1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5227,6 +5227,8 @@ fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + // TODO: write something like getCoercedInts to avoid needing to dupe + const duped_bytes = try sema.arena.dupe(u8, bytes); const ty = try mod.arrayType(.{ .len = bytes.len, .child = .u8_type, @@ -5234,7 +5236,7 @@ fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Ins }); const val = try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), - .storage = .{ .bytes = bytes }, + .storage = .{ .bytes = duped_bytes }, } }); const gop = try mod.memoized_decls.getOrPut(gpa, val); if (!gop.found_existing) { @@ -11478,7 +11480,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError operand_ty.fmt(mod), }); } - for (operand_ty.errorSetNames(mod)) |error_name_ip| { + for (0..operand_ty.errorSetNames(mod).len) |i| { + const error_name_ip = operand_ty.errorSetNames(mod)[i]; const error_name = mod.intern_pool.stringToSlice(error_name_ip); if (seen_errors.contains(error_name)) continue; cases_len += 1; @@ -15851,7 +15854,8 @@ fn zirBuiltinSrc( const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = mod.intern_pool.stringToSlice(fn_owner_decl.name); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, mod.intern_pool.stringToSlice(fn_owner_decl.name)); const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, @@ -16287,7 +16291,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const error_field_vals = if (ty.isAnyError(mod)) null else blk: { const vals = try sema.arena.alloc(InternPool.Index, ty.errorSetNames(mod).len); for (vals, 0..) |*field_val, i| { - const name = ip.stringToSlice(ty.errorSetNames(mod)[i]); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(ty.errorSetNames(mod)[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16417,8 +16422,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const enum_field_vals = try sema.arena.alloc(InternPool.Index, enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { - const name_ip = ip.indexToKey(ty.toIntern()).enum_type.names[i]; - const name = ip.stringToSlice(name_ip); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(ip.indexToKey(ty.toIntern()).enum_type.names[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16556,7 +16561,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai for (union_field_vals, 0..) |*field_val, i| { const field = union_fields.values()[i]; - const name = ip.stringToSlice(union_fields.keys()[i]); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(union_fields.keys()[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -16714,9 +16720,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); + // TODO: write something like getCoercedInts to avoid needing to dupe const bytes = if (tuple.names.len != 0) // https://github.com/ziglang/zig/issues/15709 - @as([]const u8, ip.stringToSlice(tuple.names[i])) + try sema.arena.dupe(u8, ip.stringToSlice(ip.indexToKey(struct_ty.toIntern()).anon_struct_type.names[i])) else try std.fmt.allocPrint(sema.arena, "{d}", .{i}); const new_decl_ty = try mod.arrayType(.{ @@ -16771,7 +16778,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai struct_obj.fields.keys(), struct_obj.fields.values(), ) |*field_val, name_nts, field| { - const name = ip.stringToSlice(name_nts); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(name_nts)); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -17020,9 +17028,8 @@ fn typeInfoNamespaceDecls( const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - // Protects the decl name slice from being invalidated at the call to intern(). - try ip.string_bytes.ensureUnusedCapacity(sema.gpa, ip.stringToSlice(decl.name).len + 1); - const name = ip.stringToSlice(decl.name); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(decl.name)); const new_decl_ty = try mod.arrayType(.{ .len = name.len, .child = .u8_type, @@ -19060,6 +19067,7 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }; + // TODO: write something like getCoercedInts to avoid needing to dupe const field_name = enum_ty.enumFieldName(field_index, mod); return sema.addStrLit(block, ip.stringToSlice(field_name)); } @@ -19601,7 +19609,6 @@ fn zirReify( // Tag type const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); var explicit_tags_seen: []bool = &.{}; - var explicit_enum_info: ?InternPool.Key.EnumType = null; var enum_field_names: []InternPool.NullTerminatedString = &.{}; if (tag_type_val.optionalValue(mod)) |payload_val| { union_obj.tag_ty = payload_val.toType(); @@ -19611,7 +19618,6 @@ fn zirReify( else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}), }; - explicit_enum_info = enum_type; explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); @memset(explicit_tags_seen, false); } else { @@ -19640,7 +19646,8 @@ fn zirReify( enum_field_names[i] = field_name; } - if (explicit_enum_info) |tag_info| { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ ip.stringToSlice(field_name), union_obj.tag_ty.fmt(mod) }); @@ -19705,7 +19712,8 @@ fn zirReify( } } - if (explicit_enum_info) |tag_info| { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{}); @@ -31625,17 +31633,17 @@ fn resolvePeerTypes( return chosen_ty; } -pub fn resolveFnTypes(sema: *Sema, fn_info: InternPool.Key.FuncType) CompileError!void { +pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { const mod = sema.mod; - try sema.resolveTypeFully(fn_info.return_type.toType()); + try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.return_type.toType()); - if (mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.toType().isError(mod)) { + if (mod.comp.bin_file.options.error_return_tracing and mod.typeToFunc(fn_ty).?.return_type.toType().isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } - for (fn_info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty.toType()); + for (0..mod.typeToFunc(fn_ty).?.param_types.len) |i| { + try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.param_types[i].toType()); } } @@ -33077,7 +33085,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { var enum_field_names: []InternPool.NullTerminatedString = &.{}; var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}; var explicit_tags_seen: []bool = &.{}; - var explicit_enum_info: ?InternPool.Key.EnumType = null; if (tag_type_ref != .none) { const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x }; const provided_ty = try sema.resolveType(&block_scope, tag_ty_src, tag_type_ref); @@ -33114,7 +33121,6 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { }; // The fields of the union must match the enum exactly. // A flag per field is used to check for missing and extraneous fields. - explicit_enum_info = enum_type; explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); @memset(explicit_tags_seen, false); } @@ -33256,7 +33262,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { return sema.failWithOwnedErrorMsg(msg); } - if (explicit_enum_info) |tag_info| { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ @@ -33346,7 +33353,8 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } } - if (explicit_enum_info) |tag_info| { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{}); @@ -33706,9 +33714,10 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } // In this case the struct has all comptime-known fields and // therefore has one possible value. + // TODO: write something like getCoercedInts to avoid needing to dupe return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), - .storage = .{ .elems = tuple.values }, + .storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values) }, } })).toValue(); }, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 2675d5350a..a1b57516ee 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2026,13 +2026,9 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = enum_ty }); var data_off: i32 = 0; - for ( - exitlude_jump_relocs, - enum_ty.enumFields(mod), - 0.., - ) |*exitlude_jump_reloc, tag_name_ip, index_usize| { + for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, index_usize| { const index = @intCast(u32, index_usize); - const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); + const tag_name = mod.intern_pool.stringToSlice(enum_ty.enumFields(mod)[index_usize]); const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv); diff --git a/src/codegen.zig b/src/codegen.zig index 77359d78da..b39c3c5ec0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -517,7 +517,7 @@ pub fn generateSymbol( const field_ty = field.ty; if (!field_ty.hasRuntimeBits(mod)) continue; - const field_val = switch (aggregate.storage) { + const field_val = switch (mod.intern_pool.indexToKey(typed_value.val.toIntern()).aggregate.storage) { .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ .ty = field_ty.toIntern(), .storage = .{ .u64 = bytes[index] }, diff --git a/src/type.zig b/src/type.zig index 43aaf3c786..d9ae710b2d 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2576,9 +2576,12 @@ pub const Type = struct { } // In this case the struct has all comptime-known fields and // therefore has one possible value. + // TODO: write something like getCoercedInts to avoid needing to dupe + const duped_values = try mod.gpa.dupe(InternPool.Index, tuple.values); + defer mod.gpa.free(duped_values); return (try mod.intern(.{ .aggregate = .{ .ty = ty.toIntern(), - .storage = .{ .elems = tuple.values }, + .storage = .{ .elems = duped_values }, } })).toValue(); }, diff --git a/src/value.zig b/src/value.zig index 3958615214..8ab98bc994 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1904,6 +1904,7 @@ pub const Value = struct { start: usize, end: usize, ) error{OutOfMemory}!Value { + // TODO: write something like getCoercedInts to avoid needing to dupe return switch (val.ip_index) { .none => switch (val.tag()) { .slice => val.castTag(.slice).?.data.ptr.sliceArray(mod, arena, start, end), @@ -1937,8 +1938,8 @@ pub const Value = struct { else => unreachable, }.toIntern(), .storage = switch (aggregate.storage) { - .bytes => |bytes| .{ .bytes = bytes[start..end] }, - .elems => |elems| .{ .elems = elems[start..end] }, + .bytes => .{ .bytes = try arena.dupe(u8, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.bytes[start..end]) }, + .elems => .{ .elems = try arena.dupe(InternPool.Index, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.elems[start..end]) }, .repeated_elem => |elem| .{ .repeated_elem = elem }, }, } })).toValue(), -- cgit v1.2.3 From 44d8cf9331218653c283a930bbc74e6871fe1701 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 4 Jun 2023 10:26:01 -0400 Subject: wasm: address behavior test regressions --- src/arch/wasm/CodeGen.zig | 28 ++++++++++++++++++++++------ test/behavior/bugs/1381.zig | 1 + 2 files changed, 23 insertions(+), 6 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 9b7ba19c13..4c1d5b4081 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2960,10 +2960,21 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { const offset = index * elem_type.abiSize(mod); const array_ptr = try func.lowerParentPtr(elem.base.toValue()); - return WValue{ .memory_offset = .{ - .pointer = array_ptr.memory, - .offset = @intCast(u32, offset), - } }; + return switch (array_ptr) { + .memory => |ptr_| WValue{ + .memory_offset = .{ + .pointer = ptr_, + .offset = @intCast(u32, offset), + }, + }, + .memory_offset => |mem_off| WValue{ + .memory_offset = .{ + .pointer = mem_off.pointer, + .offset = @intCast(u32, offset) + mem_off.offset, + }, + }, + else => unreachable, + }; }, .field => |field| { const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); @@ -3253,7 +3264,12 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { }, else => unreachable, }, - .un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), + .un => |union_obj| { + // in this case we have a packed union which will not be passed by reference. + const field_index = ty.unionTagFieldIndex(union_obj.tag.toValue(), func.bin_file.base.options.module.?).?; + const field_ty = ty.unionFields(mod).values()[field_index].ty; + return func.lowerConstant(union_obj.val.toValue(), field_ty); + }, .memoized_call => unreachable, } } @@ -7173,7 +7189,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val try WValue.toLocal(.stack, func, result_ty); }; - return func.finishAir(inst, result_ptr, &.{ extra.ptr, extra.new_value, extra.expected_value }); + return func.finishAir(inst, result_ptr, &.{ extra.ptr, extra.expected_value, extra.new_value }); } fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { diff --git a/test/behavior/bugs/1381.zig b/test/behavior/bugs/1381.zig index 90941de341..f35c963df3 100644 --- a/test/behavior/bugs/1381.zig +++ b/test/behavior/bugs/1381.zig @@ -17,6 +17,7 @@ test "union that needs padding bytes inside an array" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; var as = [_]A{ A{ .B = B{ .D = 1 } }, -- cgit v1.2.3 From 63604024f47767b7b0c0deba5c9647cd6c040931 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 11 Jun 2023 13:15:37 +0100 Subject: stage2: fix InternPool compile errors on 32-bit targets --- src/InternPool.zig | 12 ++++++------ src/Sema.zig | 19 +++++++++++-------- src/TypedValue.zig | 4 ++-- src/arch/wasm/CodeGen.zig | 4 ++-- src/codegen/c.zig | 2 +- src/value.zig | 11 ++++++----- 6 files changed, 28 insertions(+), 24 deletions(-) (limited to 'src/arch') diff --git a/src/InternPool.zig b/src/InternPool.zig index a46f765ad5..9ca5a48a55 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3824,13 +3824,13 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { assert(child == .u8_type); if (bytes.len != len) { assert(bytes.len == len_including_sentinel); - assert(bytes[len] == ip.indexToKey(sentinel).int.storage.u64); + assert(bytes[@intCast(usize, len)] == ip.indexToKey(sentinel).int.storage.u64); } }, .elems => |elems| { if (elems.len != len) { assert(elems.len == len_including_sentinel); - assert(elems[len] == sentinel); + assert(elems[@intCast(usize, len)] == sentinel); } }, .repeated_elem => |elem| { @@ -3936,7 +3936,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { if (child == .u8_type) bytes: { const string_bytes_index = ip.string_bytes.items.len; - try ip.string_bytes.ensureUnusedCapacity(gpa, len_including_sentinel + 1); + try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(usize, len_including_sentinel + 1)); try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); switch (aggregate.storage) { .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), @@ -3953,7 +3953,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .repeated_elem => |elem| switch (ip.indexToKey(elem)) { .undef => break :bytes, .int => |int| @memset( - ip.string_bytes.addManyAsSliceAssumeCapacity(len), + ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(usize, len)), @intCast(u8, int.storage.u64), ), else => unreachable, @@ -3967,7 +3967,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const string = if (has_internal_null) @intToEnum(String, string_bytes_index) else - (try ip.getOrPutTrailingString(gpa, len_including_sentinel)).toString(); + (try ip.getOrPutTrailingString(gpa, @intCast(usize, len_including_sentinel))).toString(); ip.items.appendAssumeCapacity(.{ .tag = .bytes, .data = ip.addExtraAssumeCapacity(Bytes{ @@ -3980,7 +3980,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { try ip.extra.ensureUnusedCapacity( gpa, - @typeInfo(Tag.Aggregate).Struct.fields.len + len_including_sentinel, + @typeInfo(Tag.Aggregate).Struct.fields.len + @intCast(usize, len_including_sentinel), ); ip.items.appendAssumeCapacity(.{ .tag = .aggregate, diff --git a/src/Sema.zig b/src/Sema.zig index 21c0402c04..8d733dfb3c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -28186,11 +28186,12 @@ fn beginComptimePtrMutation( const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); return .{ .mut_decl = parent.mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_ptr.index, + .byte_offset = elem_abi_size * elem_idx, } }, .ty = parent.ty, }; @@ -28223,7 +28224,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[elem_ptr.index], + &elems[@intCast(usize, elem_ptr.index)], ptr_elem_ty, parent.mut_decl, ); @@ -28254,7 +28255,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[elem_ptr.index], + &elems[@intCast(usize, elem_ptr.index)], ptr_elem_ty, parent.mut_decl, ); @@ -28265,7 +28266,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], + &val_ptr.castTag(.aggregate).?.data[@intCast(usize, elem_ptr.index)], ptr_elem_ty, parent.mut_decl, ), @@ -28291,7 +28292,7 @@ fn beginComptimePtrMutation( block, src, elem_ty, - &elems[elem_ptr.index], + &elems[@intCast(usize, elem_ptr.index)], ptr_elem_ty, parent.mut_decl, ); @@ -28331,11 +28332,12 @@ fn beginComptimePtrMutation( const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); return ComptimePtrMutationKit{ .mut_decl = parent.mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, + .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_idx, } }, .ty = parent.ty, }; @@ -28750,9 +28752,10 @@ fn beginComptimePtrLoad( // the pointee array directly from our parent array. if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ .ty = try Type.array(sema.arena, N, null, elem_ty, mod), - .val = try array_tv.val.sliceArray(mod, sema.arena, elem_ptr.index, elem_ptr.index + N), + .val = try array_tv.val.sliceArray(mod, sema.arena, elem_idx, elem_idx + N), } else null; break :blk deref; } @@ -28773,7 +28776,7 @@ fn beginComptimePtrLoad( } deref.pointee = TypedValue{ .ty = elem_ty, - .val = try array_tv.val.elemValue(mod, elem_ptr.index), + .val = try array_tv.val.elemValue(mod, @intCast(usize, elem_ptr.index)), }; break :blk deref; }, diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 26bf25bbac..7faff3af01 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -356,12 +356,12 @@ pub fn print( if (container_ty.isTuple(mod)) { try writer.print("[{d}]", .{field.index}); } - const field_name_ip = container_ty.structFieldName(field.index, mod); + const field_name_ip = container_ty.structFieldName(@intCast(usize, field.index), mod); const field_name = mod.intern_pool.stringToSlice(field_name_ip); try writer.print(".{}", .{std.zig.fmtId(field_name)}); }, .Union => { - const field_name_ip = container_ty.unionFields(mod).keys()[field.index]; + const field_name_ip = container_ty.unionFields(mod).keys()[@intCast(usize, field.index)]; const field_name = mod.intern_pool.stringToSlice(field_name_ip); try writer.print(".{}", .{std.zig.fmtId(field_name)}); }, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 4c1d5b4081..877db4b623 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2982,8 +2982,8 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { const offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout(mod)) { - .Packed => parent_ty.packedStructFieldByteOffset(field.index, mod), - else => parent_ty.structFieldOffset(field.index, mod), + .Packed => parent_ty.packedStructFieldByteOffset(@intCast(usize, field.index), mod), + else => parent_ty.structFieldOffset(@intCast(usize, field.index), mod), }, .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 7b091d6823..8d2ba2bbb8 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -642,7 +642,7 @@ pub const DeclGen = struct { // Ensure complete type definition is visible before accessing fields. _ = try dg.typeToIndex(base_ty, .complete); const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) { - .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(field.index, mod), + .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@intCast(usize, field.index), mod), .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .One, .Many, .C => unreachable, .Slice => switch (field.index) { diff --git a/src/value.zig b/src/value.zig index 6f603c248e..dbf25324e5 100644 --- a/src/value.zig +++ b/src/value.zig @@ -395,7 +395,8 @@ pub const Value = struct { } }); }, .aggregate => { - const old_elems = val.castTag(.aggregate).?.data[0..ty.arrayLen(mod)]; + const len = @intCast(usize, ty.arrayLen(mod)); + const old_elems = val.castTag(.aggregate).?.data[0..len]; const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); defer mod.gpa.free(new_elems); const ty_key = mod.intern_pool.indexToKey(ty.toIntern()); @@ -642,7 +643,7 @@ pub const Value = struct { const base_addr = (try field.base.toValue().getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; const struct_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); - return base_addr + struct_ty.structFieldOffset(field.index, mod); + return base_addr + struct_ty.structFieldOffset(@intCast(usize, field.index), mod); }, else => null, }, @@ -1798,10 +1799,10 @@ pub const Value = struct { .int, .eu_payload => unreachable, .opt_payload => |base| base.toValue().elemValue(mod, index), .comptime_field => |field_val| field_val.toValue().elemValue(mod, index), - .elem => |elem| elem.base.toValue().elemValue(mod, index + elem.index), + .elem => |elem| elem.base.toValue().elemValue(mod, index + @intCast(usize, elem.index)), .field => |field| if (field.base.toValue().pointerDecl(mod)) |decl_index| { const base_decl = mod.declPtr(decl_index); - const field_val = try base_decl.val.fieldValue(mod, field.index); + const field_val = try base_decl.val.fieldValue(mod, @intCast(usize, field.index)); return field_val.elemValue(mod, index); } else unreachable, }, @@ -1921,7 +1922,7 @@ pub const Value = struct { .comptime_field => |comptime_field| comptime_field.toValue() .sliceArray(mod, arena, start, end), .elem => |elem| elem.base.toValue() - .sliceArray(mod, arena, start + elem.index, end + elem.index), + .sliceArray(mod, arena, start + @intCast(usize, elem.index), end + @intCast(usize, elem.index)), else => unreachable, }, .aggregate => |aggregate| (try mod.intern(.{ .aggregate = .{ -- cgit v1.2.3 From 9e8c7b104e6bfa2821b79ee05c5583776749f136 Mon Sep 17 00:00:00 2001 From: Jacob G-W Date: Sat, 10 Jun 2023 19:27:54 -0400 Subject: Plan9: Add support for lazy symbols This includes a renaming from DeclBlock to Atom. --- src/arch/aarch64/CodeGen.zig | 11 +- src/arch/x86_64/CodeGen.zig | 33 +++- src/codegen.zig | 7 +- src/link/Plan9.zig | 348 ++++++++++++++++++++++++++++++++++++------- 4 files changed, 326 insertions(+), 73 deletions(-) (limited to 'src/arch') diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index bf945e6983..dd752555b7 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4335,14 +4335,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier }, }); } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - const decl_block_index = try p9.seeDecl(func.owner_decl); - const decl_block = p9.getDeclBlock(decl_block_index); - const ptr_bits = self.target.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const got_addr = p9.bases.data; - const got_index = decl_block.got_index.?; - const fn_got_addr = got_addr + got_index * ptr_bytes; - try self.genSetReg(Type.usize, .x30, .{ .memory = fn_got_addr }); + const atom_index = try p9.seeDecl(func.owner_decl); + const atom = p9.getAtom(atom_index); + try self.genSetReg(Type.usize, .x30, .{ .memory = atom.getOffsetTableAddress(p9) }); } else unreachable; _ = try self.addInst(.{ diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index a1b57516ee..e798d46f27 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -8115,16 +8115,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier try self.genSetReg(.rax, Type.usize, .{ .lea_got = sym_index }); try self.asmRegister(.{ ._, .call }, .rax); } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - const decl_block_index = try p9.seeDecl(owner_decl); - const decl_block = p9.getDeclBlock(decl_block_index); - const ptr_bits = self.target.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const got_addr = p9.bases.data; - const got_index = decl_block.got_index.?; - const fn_got_addr = got_addr + got_index * ptr_bytes; + const atom_index = try p9.seeDecl(owner_decl); + const atom = p9.getAtom(atom_index); try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .ds }, - .disp = @intCast(i32, fn_got_addr), + .disp = @intCast(i32, atom.getOffsetTableAddress(p9)), })); } else unreachable; } else if (func_value.getExternFunc(mod)) |extern_func| { @@ -10092,6 +10087,28 @@ fn genLazySymbolRef( ), else => unreachable, } + } else if (self.bin_file.cast(link.File.Plan9)) |p9_file| { + const atom_index = p9_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return self.fail("{s} creating lazy symbol", .{@errorName(err)}); + var atom = p9_file.getAtom(atom_index); + _ = atom.getOrCreateOffsetTableEntry(p9_file); + const got_addr = atom.getOffsetTableAddress(p9_file); + const got_mem = + Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(i32, got_addr) }); + switch (tag) { + .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem), + .call => try self.asmMemory(.{ ._, .call }, got_mem), + else => unreachable, + } + switch (tag) { + .lea, .call => {}, + .mov => try self.asmRegisterMemory( + .{ ._, tag }, + reg.to64(), + Memory.sib(.qword, .{ .base = .{ .reg = reg.to64() } }), + ), + else => unreachable, + } } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| return self.fail("{s} creating lazy symbol", .{@errorName(err)}); diff --git a/src/codegen.zig b/src/codegen.zig index b39c3c5ec0..7625fbe031 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -852,10 +852,9 @@ fn genDeclRef( const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; return GenResult.mcv(.{ .load_got = sym_index }); } else if (bin_file.cast(link.File.Plan9)) |p9| { - const decl_block_index = try p9.seeDecl(decl_index); - const decl_block = p9.getDeclBlock(decl_block_index); - const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes; - return GenResult.mcv(.{ .memory = got_addr }); + const atom_index = try p9.seeDecl(decl_index); + const atom = p9.getAtom(atom_index); + return GenResult.mcv(.{ .memory = atom.getOffsetTableAddress(p9) }); } else { return GenResult.fail(bin_file.allocator, src_loc, "TODO genDeclRef for target {}", .{target}); } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index c08754b57a..dccfa1d6c0 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -79,6 +79,8 @@ data_decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, []u8) = .{}, /// with `Decl` `main`, and lives as long as that `Decl`. unnamed_const_atoms: UnnamedConstTable = .{}, +lazy_syms: LazySymbolTable = .{}, + relocs: std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Reloc)) = .{}, hdr: aout.ExecHdr = undefined, @@ -94,7 +96,7 @@ got_index_free_list: std.ArrayListUnmanaged(usize) = .{}, syms_index_free_list: std.ArrayListUnmanaged(usize) = .{}, -decl_blocks: std.ArrayListUnmanaged(DeclBlock) = .{}, +atoms: std.ArrayListUnmanaged(Atom) = .{}, decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{}, const Reloc = struct { @@ -109,11 +111,28 @@ const Bases = struct { data: u64, }; -const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(struct { info: DeclBlock, code: []const u8 })); +const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(struct { info: Atom, code: []const u8 })); + +const LazySymbolTable = std.AutoArrayHashMapUnmanaged(Module.Decl.OptionalIndex, LazySymbolMetadata); + +const LazySymbolMetadata = struct { + const State = enum { unused, pending_flush, flushed }; + text_atom: Atom.Index = undefined, + rodata_atom: Atom.Index = undefined, + text_state: State = .unused, + rodata_state: State = .unused, + + fn numberOfAtoms(self: LazySymbolMetadata) u32 { + var n: u32 = 0; + if (self.text_state != .unused) n += 1; + if (self.rodata_state != .unused) n += 1; + return n; + } +}; pub const PtrWidth = enum { p32, p64 }; -pub const DeclBlock = struct { +pub const Atom = struct { type: aout.Sym.Type, /// offset in the text or data sects offset: ?u64, @@ -121,12 +140,36 @@ pub const DeclBlock = struct { sym_index: ?usize, /// offset into got got_index: ?usize, + /// We can optionally store code with the atom + /// It is still owned by whatever created it + /// This can be useful so that we don't need + /// to setup so much infrastructure just to store code + /// for stuff like LazySymbols. + code: ?[]const u8 = null, pub const Index = u32; + + pub fn getOrCreateOffsetTableEntry(self: *Atom, plan9: *Plan9) usize { + if (self.got_index == null) self.got_index = plan9.allocateGotIndex(); + return self.got_index.?; + } + + pub fn getOrCreateSymbolTableEntry(self: *Atom, plan9: *Plan9) !usize { + if (self.sym_index == null) self.sym_index = try plan9.allocateSymbolIndex(); + return self.sym_index.?; + } + + // asserts that self.got_index != null + pub fn getOffsetTableAddress(self: Atom, plan9: *Plan9) u64 { + const ptr_bytes = @divExact(plan9.base.options.target.ptrBitWidth(), 8); + const got_addr = plan9.bases.data; + const got_index = self.got_index.?; + return got_addr + got_index * ptr_bytes; + } }; const DeclMetadata = struct { - index: DeclBlock.Index, + index: Atom.Index, exports: std.ArrayListUnmanaged(usize) = .{}, fn getExport(m: DeclMetadata, p9: *const Plan9, name: []const u8) ?usize { @@ -352,7 +395,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I const sym_index = try self.allocateSymbolIndex(); - const info: DeclBlock = .{ + const info: Atom = .{ .type = .d, .offset = null, .sym_index = sym_index, @@ -433,22 +476,22 @@ fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void { const is_fn = (decl.ty.zigTypeTag(mod) == .Fn); const sym_t: aout.Sym.Type = if (is_fn) .t else .d; - const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); + const atom = self.getAtomPtr(self.decls.get(decl_index).?.index); // write the internal linker metadata - decl_block.type = sym_t; + atom.type = sym_t; // write the symbol // we already have the got index const sym: aout.Sym = .{ .value = undefined, // the value of stuff gets filled in in flushModule - .type = decl_block.type, + .type = atom.type, .name = try self.base.allocator.dupe(u8, mod.intern_pool.stringToSlice(decl.name)), }; - if (decl_block.sym_index) |s| { + if (atom.sym_index) |s| { self.syms.items[s] = sym; } else { const s = try self.allocateSymbolIndex(); - decl_block.sym_index = s; + atom.sym_index = s; self.syms.items[s] = sym; } } @@ -461,6 +504,7 @@ fn allocateSymbolIndex(self: *Plan9) !usize { return self.syms.items.len - 1; } } + fn allocateGotIndex(self: *Plan9) usize { if (self.got_index_free_list.popOrNull()) |i| { return i; @@ -495,7 +539,7 @@ pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void { } } -// counts decls and unnamed consts +// counts decls, unnamed consts, and lazy syms fn atomCount(self: *Plan9) usize { var fn_decl_count: usize = 0; var itf_files = self.fn_decl_table.iterator(); @@ -510,7 +554,12 @@ fn atomCount(self: *Plan9) usize { while (it_unc.next()) |unnamed_consts| { unnamed_const_count += unnamed_consts.value_ptr.items.len; } - return data_decl_count + fn_decl_count + unnamed_const_count; + var lazy_atom_count: usize = 0; + var it_lazy = self.lazy_syms.iterator(); + while (it_lazy.next()) |kv| { + lazy_atom_count += kv.value_ptr.numberOfAtoms(); + } + return data_decl_count + fn_decl_count + unnamed_const_count + lazy_atom_count; } pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void { @@ -532,7 +581,32 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const mod = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; - assert(self.got_len == self.atomCount() + self.got_index_free_list.items.len); + // finish up the lazy syms + if (self.lazy_syms.getPtr(.none)) |metadata| { + // Most lazy symbols can be updated on first use, but + // anyerror needs to wait for everything to be flushed. + if (metadata.text_state != .unused) self.updateLazySymbolAtom( + File.LazySymbol.initDecl(.code, null, mod), + metadata.text_atom, + ) catch |err| return switch (err) { + error.CodegenFail => error.FlushFailure, + else => |e| e, + }; + if (metadata.rodata_state != .unused) self.updateLazySymbolAtom( + File.LazySymbol.initDecl(.const_data, null, mod), + metadata.rodata_atom, + ) catch |err| return switch (err) { + error.CodegenFail => error.FlushFailure, + else => |e| e, + }; + } + for (self.lazy_syms.values()) |*metadata| { + if (metadata.text_state != .unused) metadata.text_state = .flushed; + if (metadata.rodata_state != .unused) metadata.rodata_state = .flushed; + } + // make sure the got table is good + const atom_count = self.atomCount(); + assert(self.got_len == atom_count + self.got_index_free_list.items.len); const got_size = self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8; var got_table = try self.base.allocator.alloc(u8, got_size); defer self.base.allocator.free(got_table); @@ -562,7 +636,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No var it = fentry.value_ptr.functions.iterator(); while (it.next()) |entry| { const decl_index = entry.key_ptr.*; - const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); + const atom = self.getAtomPtr(self.decls.get(decl_index).?.index); const out = entry.value_ptr.*; { // connect the previous decl to the next @@ -580,14 +654,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No iovecs_i += 1; const off = self.getAddr(text_i, .t); text_i += out.code.len; - decl_block.offset = off; + atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeIntNative(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off)); - mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } - self.syms.items[decl_block.sym_index.?].value = off; + self.syms.items[atom.sym_index.?].value = off; if (mod.decl_exports.get(decl_index)) |exports| { try self.addDeclExports(mod, decl_index, exports.items); } @@ -597,9 +670,30 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No // just a nop to make it even, the plan9 linker does this try linecountinfo.append(129); } - // etext symbol - self.syms.items[2].value = self.getAddr(text_i, .t); } + // the text lazy symbols + { + var it = self.lazy_syms.iterator(); + while (it.next()) |kv| { + const meta = kv.value_ptr; + const text_atom = if (meta.text_state != .unused) self.getAtomPtr(meta.text_atom) else continue; + const code = text_atom.code.?; + foff += code.len; + iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; + iovecs_i += 1; + const off = self.getAddr(text_i, .t); + text_i += code.len; + text_atom.offset = off; + if (!self.sixtyfour_bit) { + mem.writeInt(u32, got_table[text_atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + } else { + mem.writeInt(u64, got_table[text_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + } + self.syms.items[text_atom.sym_index.?].value = off; + } + } + // etext symbol + self.syms.items[2].value = self.getAddr(text_i, .t); // global offset table is in data iovecs[iovecs_i] = .{ .iov_base = got_table.ptr, .iov_len = got_table.len }; iovecs_i += 1; @@ -609,7 +703,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No var it = self.data_decl_table.iterator(); while (it.next()) |entry| { const decl_index = entry.key_ptr.*; - const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index); + const atom = self.getAtomPtr(self.decls.get(decl_index).?.index); const code = entry.value_ptr.*; foff += code.len; @@ -617,13 +711,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No iovecs_i += 1; const off = self.getAddr(data_i, .d); data_i += code.len; - decl_block.offset = off; + atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } - self.syms.items[decl_block.sym_index.?].value = off; + self.syms.items[atom.sym_index.?].value = off; if (mod.decl_exports.get(decl_index)) |exports| { try self.addDeclExports(mod, decl_index, exports.items); } @@ -648,11 +742,30 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No self.syms.items[unnamed_const.info.sym_index.?].value = off; } } + // the lazy data symbols + var it_lazy = self.lazy_syms.iterator(); + while (it_lazy.next()) |kv| { + const meta = kv.value_ptr; + const data_atom = if (meta.rodata_state != .unused) self.getAtomPtr(meta.rodata_atom) else continue; + const code = data_atom.code.?; + foff += code.len; + iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; + iovecs_i += 1; + const off = self.getAddr(data_i, .d); + data_i += code.len; + data_atom.offset = off; + if (!self.sixtyfour_bit) { + mem.writeInt(u32, got_table[data_atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + } else { + mem.writeInt(u64, got_table[data_atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + } + self.syms.items[data_atom.sym_index.?].value = off; + } // edata symbol self.syms.items[0].value = self.getAddr(data_i, .b); + // end + self.syms.items[1].value = self.getAddr(data_i, .b); } - // edata - self.syms.items[1].value = self.getAddr(0x0, .b); var sym_buf = std.ArrayList(u8).init(self.base.allocator); try self.writeSyms(&sym_buf); const syms = try sym_buf.toOwnedSlice(); @@ -686,8 +799,10 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const source_decl = mod.declPtr(source_decl_index); for (kv.value_ptr.items) |reloc| { const target_decl_index = reloc.target; - const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index); - const target_decl_offset = target_decl_block.offset.?; + const target_decl = mod.declPtr(target_decl_index); + _ = target_decl; + const target_atom = self.getAtom(self.decls.get(target_decl_index).?.index); + const target_decl_offset = target_atom.offset.?; const offset = reloc.offset; const addend = reloc.addend; @@ -722,7 +837,7 @@ fn addDeclExports( exports: []const *Module.Export, ) !void { const metadata = self.decls.getPtr(decl_index).?; - const decl_block = self.getDeclBlock(metadata.index); + const atom = self.getAtom(metadata.index); for (exports) |exp| { const exp_name = mod.intern_pool.stringToSlice(exp.opts.name); @@ -739,8 +854,8 @@ fn addDeclExports( } } const sym = .{ - .value = decl_block.offset.?, - .type = decl_block.type.toGlobal(), + .value = atom.offset.?, + .type = atom.type.toGlobal(), .name = try self.base.allocator.dupe(u8, exp_name), }; @@ -780,12 +895,12 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { } if (self.decls.fetchRemove(decl_index)) |const_kv| { var kv = const_kv; - const decl_block = self.getDeclBlock(kv.value.index); - if (decl_block.got_index) |i| { + const atom = self.getAtom(kv.value.index); + if (atom.got_index) |i| { // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length self.got_index_free_list.append(self.base.allocator, i) catch {}; } - if (decl_block.sym_index) |i| { + if (atom.sym_index) |i| { self.syms_index_free_list.append(self.base.allocator, i) catch {}; self.syms.items[i] = aout.Sym.undefined_symbol; } @@ -809,11 +924,11 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void { unnamed_consts.clearAndFree(self.base.allocator); } -fn createDeclBlock(self: *Plan9) !DeclBlock.Index { +fn createAtom(self: *Plan9) !Atom.Index { const gpa = self.base.allocator; - const index = @intCast(DeclBlock.Index, self.decl_blocks.items.len); - const decl_block = try self.decl_blocks.addOne(gpa); - decl_block.* = .{ + const index = @intCast(Atom.Index, self.atoms.items.len); + const atom = try self.atoms.addOne(gpa); + atom.* = .{ .type = .t, .offset = null, .sym_index = null, @@ -822,11 +937,11 @@ fn createDeclBlock(self: *Plan9) !DeclBlock.Index { return index; } -pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !DeclBlock.Index { +pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !Atom.Index { const gop = try self.decls.getOrPut(self.base.allocator, decl_index); if (!gop.found_existing) { - const index = try self.createDeclBlock(); - self.getDeclBlockPtr(index).got_index = self.allocateGotIndex(); + const index = try self.createAtom(); + self.getAtomPtr(index).got_index = self.allocateGotIndex(); gop.value_ptr.* = .{ .index = index, .exports = .{}, @@ -846,6 +961,89 @@ pub fn updateDeclExports( _ = module; _ = exports; } + +pub fn getOrCreateAtomForLazySymbol(self: *Plan9, sym: File.LazySymbol) !Atom.Index { + const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(self.base.options.module.?)); + errdefer _ = if (!gop.found_existing) self.lazy_syms.pop(); + + if (!gop.found_existing) gop.value_ptr.* = .{}; + + const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) { + .code => .{ .atom = &gop.value_ptr.text_atom, .state = &gop.value_ptr.text_state }, + .const_data => .{ .atom = &gop.value_ptr.rodata_atom, .state = &gop.value_ptr.rodata_state }, + }; + switch (metadata.state.*) { + .unused => metadata.atom.* = try self.createAtom(), + .pending_flush => return metadata.atom.*, + .flushed => {}, + } + metadata.state.* = .pending_flush; + const atom = metadata.atom.*; + _ = try self.getAtomPtr(atom).getOrCreateSymbolTableEntry(self); + _ = self.getAtomPtr(atom).getOrCreateOffsetTableEntry(self); + // anyerror needs to be deferred until flushModule + if (sym.getDecl(self.base.options.module.?) != .none) { + try self.updateLazySymbolAtom(sym, atom); + } + return atom; +} + +fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Index) !void { + const gpa = self.base.allocator; + const mod = self.base.options.module.?; + + const atom = self.getAtomPtr(atom_index); + const local_sym_index = atom.sym_index.?; + + var required_alignment: u32 = undefined; + var code_buffer = std.ArrayList(u8).init(gpa); + defer code_buffer.deinit(); + + // create the symbol for the name + const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{ + @tagName(sym.kind), + sym.ty.fmt(mod), + }); + + const symbol: aout.Sym = .{ + .value = undefined, + .type = if (sym.kind == .code) .t else .d, + .name = name, + }; + self.syms.items[atom.sym_index.?] = symbol; + + // generate the code + const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| + mod.declPtr(owner_decl).srcLoc(mod) + else + Module.SrcLoc{ + .file_scope = undefined, + .parent_decl_node = undefined, + .lazy = .unneeded, + }; + const res = try codegen.generateLazySymbol( + &self.base, + src, + sym, + &required_alignment, + &code_buffer, + .none, + .{ .parent_atom_index = @intCast(u32, local_sym_index) }, + ); + const code = switch (res) { + .ok => code_buffer.items, + .fail => |em| { + log.err("{s}", .{em.msg}); + return error.CodegenFail; + }, + }; + // duped_code is freed when the atom is freed + var duped_code = try self.base.allocator.dupe(u8, code); + errdefer self.base.allocator.free(duped_code); + + atom.code = duped_code; +} + pub fn deinit(self: *Plan9) void { const gpa = self.base.allocator; { @@ -861,6 +1059,14 @@ pub fn deinit(self: *Plan9) void { self.freeUnnamedConsts(kv.key_ptr.*); } self.unnamed_const_atoms.deinit(gpa); + var it_lzc = self.lazy_syms.iterator(); + while (it_lzc.next()) |kv| { + if (kv.value_ptr.text_state != .unused) + gpa.free(self.syms.items[self.getAtom(kv.value_ptr.text_atom).sym_index.?].name); + if (kv.value_ptr.rodata_state != .unused) + gpa.free(self.syms.items[self.getAtom(kv.value_ptr.rodata_atom).sym_index.?].name); + } + self.lazy_syms.deinit(gpa); var itf_files = self.fn_decl_table.iterator(); while (itf_files.next()) |ent| { // get the submap @@ -883,7 +1089,12 @@ pub fn deinit(self: *Plan9) void { self.syms_index_free_list.deinit(gpa); self.file_segments.deinit(gpa); self.path_arena.deinit(); - self.decl_blocks.deinit(gpa); + for (self.atoms.items) |atom| { + if (atom.code) |c| { + gpa.free(c); + } + } + self.atoms.deinit(gpa); { var it = self.decls.iterator(); @@ -911,7 +1122,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option self.bases = defaultBaseAddrs(options.target.cpu.arch); - // first 3 symbols in our table are edata, end, etext + // first 4 symbols in our table are edata, end, etext, and got try self.syms.appendSlice(self.base.allocator, &.{ .{ .value = 0xcafebabe, @@ -928,6 +1139,12 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option .type = .T, .name = "etext", }, + // we include the global offset table to make it easier for debugging + .{ + .value = self.getAddr(0, .d), // the global offset table starts at 0 + .type = .d, + .name = "__GOT", + }, }); return self; @@ -950,6 +1167,11 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const mod = self.base.options.module.?; const ip = &mod.intern_pool; const writer = buf.writer(); + // write the first four symbols (edata, etext, end, __GOT) + try self.writeSym(writer, self.syms.items[0]); + try self.writeSym(writer, self.syms.items[1]); + try self.writeSym(writer, self.syms.items[2]); + try self.writeSym(writer, self.syms.items[3]); // write the f symbols { var it = self.file_segments.iterator(); @@ -968,8 +1190,8 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { while (it.next()) |entry| { const decl_index = entry.key_ptr.*; const decl_metadata = self.decls.get(decl_index).?; - const decl_block = self.getDeclBlock(decl_metadata.index); - const sym = self.syms.items[decl_block.sym_index.?]; + const atom = self.getAtom(decl_metadata.index); + const sym = self.syms.items[atom.sym_index.?]; try self.writeSym(writer, sym); if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.opts.name))) |exp_i| { @@ -978,6 +1200,16 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { } } } + // the data lazy symbols + { + var it = self.lazy_syms.iterator(); + while (it.next()) |kv| { + const meta = kv.value_ptr; + const data_atom = if (meta.rodata_state != .unused) self.getAtomPtr(meta.rodata_atom) else continue; + const sym = self.syms.items[data_atom.sym_index.?]; + try self.writeSym(writer, sym); + } + } // text symbols are the hardest: // the file of a text symbol is the .z symbol before it // so we have to write everything in the right order @@ -994,8 +1226,8 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { while (submap_it.next()) |entry| { const decl_index = entry.key_ptr.*; const decl_metadata = self.decls.get(decl_index).?; - const decl_block = self.getDeclBlock(decl_metadata.index); - const sym = self.syms.items[decl_block.sym_index.?]; + const atom = self.getAtom(decl_metadata.index); + const sym = self.syms.items[atom.sym_index.?]; try self.writeSym(writer, sym); if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| { for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.opts.name))) |exp_i| { @@ -1007,6 +1239,16 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { } } } + // the text lazy symbols + { + var it = self.lazy_syms.iterator(); + while (it.next()) |kv| { + const meta = kv.value_ptr; + const text_atom = if (meta.text_state != .unused) self.getAtomPtr(meta.text_atom) else continue; + const sym = self.syms.items[text_atom.sym_index.?]; + try self.writeSym(writer, sym); + } + } } } @@ -1056,10 +1298,10 @@ pub fn getDeclVAddr( return 0; } -pub fn getDeclBlock(self: *const Plan9, index: DeclBlock.Index) DeclBlock { - return self.decl_blocks.items[index]; +pub fn getAtom(self: *const Plan9, index: Atom.Index) Atom { + return self.atoms.items[index]; } -fn getDeclBlockPtr(self: *Plan9, index: DeclBlock.Index) *DeclBlock { - return &self.decl_blocks.items[index]; +fn getAtomPtr(self: *Plan9, index: Atom.Index) *Atom { + return &self.atoms.items[index]; } -- cgit v1.2.3 From 5343a2f566a5c235055f4aebb4ab9c10773e57f0 Mon Sep 17 00:00:00 2001 From: Jacob G-W Date: Wed, 14 Jun 2023 15:43:46 -0400 Subject: plan9: revamp the relocation system to allow decl refs --- src/arch/x86_64/CodeGen.zig | 5 ++ src/arch/x86_64/Emit.zig | 8 ++ src/codegen.zig | 9 +- src/link/Plan9.zig | 195 ++++++++++++++++++++++++++++---------------- 4 files changed, 139 insertions(+), 78 deletions(-) (limited to 'src/arch') diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e798d46f27..f1669256c8 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -130,6 +130,8 @@ const Owner = union(enum) { } else if (ctx.bin_file.cast(link.File.Coff)) |coff_file| { const atom = try coff_file.getOrCreateAtomForDecl(decl_index); return coff_file.getAtom(atom).getSymbolIndex().?; + } else if (ctx.bin_file.cast(link.File.Plan9)) |p9_file| { + return p9_file.seeDecl(decl_index); } else unreachable; }, .lazy_sym => |lazy_sym| { @@ -141,6 +143,9 @@ const Owner = union(enum) { const atom = coff_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| return ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); return coff_file.getAtom(atom).getSymbolIndex().?; + } else if (ctx.bin_file.cast(link.File.Plan9)) |p9_file| { + return p9_file.getOrCreateAtomForLazySymbol(lazy_sym) catch |err| + return ctx.fail("{s} creating lazy symbol", .{@errorName(err)}); } else unreachable; }, } diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index 506092ff17..78ff918715 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -118,6 +118,14 @@ pub fn emitMir(emit: *Emit) Error!void { .pcrel = true, .length = 2, }); + } else if (emit.bin_file.cast(link.File.Plan9)) |p9_file| { + const atom_index = symbol.atom_index; + try p9_file.addReloc(atom_index, .{ // TODO we may need to add a .type field to the relocs if they are .linker_got instead of just .linker_direct + .target = symbol.sym_index, // we set sym_index to just be the atom index + .offset = @intCast(u32, end_offset - 4), + .addend = 0, + .pcrel = true, + }); } else return emit.fail("TODO implement linker reloc for {s}", .{ @tagName(emit.bin_file.tag), }), diff --git a/src/codegen.zig b/src/codegen.zig index 7625fbe031..d446200a3b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -879,12 +879,9 @@ fn genUnnamedConst( return GenResult.mcv(.{ .load_direct = local_sym_index }); } else if (bin_file.cast(link.File.Coff)) |_| { return GenResult.mcv(.{ .load_direct = local_sym_index }); - } else if (bin_file.cast(link.File.Plan9)) |p9| { - const ptr_bits = target.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const got_index = local_sym_index; // the plan9 backend returns the got_index - const got_addr = p9.bases.data + got_index * ptr_bytes; - return GenResult.mcv(.{ .memory = got_addr }); + } else if (bin_file.cast(link.File.Plan9)) |_| { + const atom_index = local_sym_index; // plan9 returns the atom_index + return GenResult.mcv(.{ .load_direct = atom_index }); } else { return GenResult.fail(bin_file.allocator, src_loc, "TODO genUnnamedConst for target {}", .{target}); } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index dccfa1d6c0..6433fb2762 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -81,7 +81,7 @@ unnamed_const_atoms: UnnamedConstTable = .{}, lazy_syms: LazySymbolTable = .{}, -relocs: std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Reloc)) = .{}, +relocs: std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Reloc)) = .{}, hdr: aout.ExecHdr = undefined, // relocs: std. @@ -100,9 +100,10 @@ atoms: std.ArrayListUnmanaged(Atom) = .{}, decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{}, const Reloc = struct { - target: Module.Decl.Index, + target: Atom.Index, offset: u64, addend: u32, + pcrel: bool = false, }; const Bases = struct { @@ -111,7 +112,7 @@ const Bases = struct { data: u64, }; -const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(struct { info: Atom, code: []const u8 })); +const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index)); const LazySymbolTable = std.AutoArrayHashMapUnmanaged(Module.Decl.OptionalIndex, LazySymbolMetadata); @@ -140,12 +141,36 @@ pub const Atom = struct { sym_index: ?usize, /// offset into got got_index: ?usize, - /// We can optionally store code with the atom - /// It is still owned by whatever created it - /// This can be useful so that we don't need - /// to setup so much infrastructure just to store code - /// for stuff like LazySymbols. - code: ?[]const u8 = null, + /// We include the code here to be use in relocs + /// In the case of unnamed_const_atoms and lazy_syms, this atom owns the code. + /// But, in the case of function and data decls, they own the code and this field + /// is just a pointer for convience. + code: CodePtr, + + const CodePtr = struct { + code_ptr: ?[*]u8, + other: union { + code_len: usize, + decl_index: Module.Decl.Index, + }, + fn getCode(self: CodePtr, plan9: *const Plan9) []u8 { + const mod = plan9.base.options.module.?; + return if (self.code_ptr) |p| p[0..self.other.code_len] else blk: { + const decl_index = self.other.decl_index; + const decl = mod.declPtr(decl_index); + if (decl.ty.zigTypeTag(mod) == .Fn) { + const table = plan9.fn_decl_table.get(decl.getFileScope(mod)).?.functions; + const output = table.get(decl_index).?; + break :blk output.code; + } else { + break :blk plan9.data_decl_table.get(decl_index).?; + } + }; + } + fn getOwnedCode(self: CodePtr) ?[]u8 { + return if (self.code_ptr) |p| p[0..self.other.code_len] else null; + } + }; pub const Index = u32; @@ -329,7 +354,7 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: const decl = mod.declPtr(decl_index); self.freeUnnamedConsts(decl_index); - _ = try self.seeDecl(decl_index); + const atom_idx = try self.seeDecl(decl_index); var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -363,6 +388,10 @@ pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: return; }, }; + self.getAtomPtr(atom_idx).code = .{ + .code_ptr = null, + .other = .{ .decl_index = decl_index }, + }; const out: FnDeclOutput = .{ .code = code, .lineinfo = try dbg_line_buffer.toOwnedSlice(), @@ -394,12 +423,13 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl_name, index }); const sym_index = try self.allocateSymbolIndex(); - - const info: Atom = .{ + const new_atom_idx = try self.createAtom(); + var info: Atom = .{ .type = .d, .offset = null, .sym_index = sym_index, .got_index = self.allocateGotIndex(), + .code = undefined, // filled in later }; const sym: aout.Sym = .{ .value = undefined, @@ -411,7 +441,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .{ .none = {}, }, .{ - .parent_atom_index = @enumToInt(decl_index), + .parent_atom_index = new_atom_idx, }); const code = switch (res) { .ok => code_buffer.items, @@ -425,9 +455,12 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I // duped_code is freed when the unnamed const is freed var duped_code = try self.base.allocator.dupe(u8, code); errdefer self.base.allocator.free(duped_code); - try unnamed_consts.append(self.base.allocator, .{ .info = info, .code = duped_code }); - // we return the got_index to codegen so that it can reference to the place of the data in the got - return @intCast(u32, info.got_index.?); + const new_atom = self.getAtomPtr(new_atom_idx); + new_atom.* = info; + new_atom.code = .{ .code_ptr = duped_code.ptr, .other = .{ .code_len = duped_code.len } }; + try unnamed_consts.append(self.base.allocator, new_atom_idx); + // we return the new_atom_idx to codegen + return new_atom_idx; } pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void { @@ -442,7 +475,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo } } - _ = try self.seeDecl(decl_index); + const atom_idx = try self.seeDecl(decl_index); var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -452,7 +485,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo .ty = decl.ty, .val = decl_val, }, &code_buffer, .{ .none = {} }, .{ - .parent_atom_index = @enumToInt(decl_index), + .parent_atom_index = @intCast(Atom.Index, atom_idx), }); const code = switch (res) { .ok => code_buffer.items, @@ -464,6 +497,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !vo }; try self.data_decl_table.ensureUnusedCapacity(self.base.allocator, 1); const duped_code = try self.base.allocator.dupe(u8, code); + self.getAtomPtr(self.decls.get(decl_index).?.index).code = .{ .code_ptr = null, .other = .{ .decl_index = decl_index } }; if (self.data_decl_table.fetchPutAssumeCapacity(decl_index, duped_code)) |old_entry| { self.base.allocator.free(old_entry.value); } @@ -636,6 +670,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No var it = fentry.value_ptr.functions.iterator(); while (it.next()) |entry| { const decl_index = entry.key_ptr.*; + const decl = mod.declPtr(decl_index); const atom = self.getAtomPtr(self.decls.get(decl_index).?.index); const out = entry.value_ptr.*; { @@ -655,6 +690,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No const off = self.getAddr(text_i, .t); text_i += out.code.len; atom.offset = off; + log.debug("write text decl {*} ({}), lines {d} to {d}.;__GOT+0x{x} vaddr: 0x{x}", .{ decl, decl.name.fmt(&mod.intern_pool), out.start_line + 1, out.end_line, atom.got_index.? * 8, off }); if (!self.sixtyfour_bit) { mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); } else { @@ -677,7 +713,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No while (it.next()) |kv| { const meta = kv.value_ptr; const text_atom = if (meta.text_state != .unused) self.getAtomPtr(meta.text_atom) else continue; - const code = text_atom.code.?; + const code = text_atom.code.getOwnedCode().?; foff += code.len; iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; iovecs_i += 1; @@ -725,21 +761,22 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No // write the unnamed constants after the other data decls var it_unc = self.unnamed_const_atoms.iterator(); while (it_unc.next()) |unnamed_consts| { - for (unnamed_consts.value_ptr.items) |*unnamed_const| { - const code = unnamed_const.code; - log.debug("write unnamed const: ({s})", .{self.syms.items[unnamed_const.info.sym_index.?].name}); + for (unnamed_consts.value_ptr.items) |atom_idx| { + const atom = self.getAtomPtr(atom_idx); + const code = atom.code.getOwnedCode().?; // unnamed consts must own their code + log.debug("write unnamed const: ({s})", .{self.syms.items[atom.sym_index.?].name}); foff += code.len; iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; iovecs_i += 1; const off = self.getAddr(data_i, .d); data_i += code.len; - unnamed_const.info.offset = off; + atom.offset = off; if (!self.sixtyfour_bit) { - mem.writeInt(u32, got_table[unnamed_const.info.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); + mem.writeInt(u32, got_table[atom.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); } else { - mem.writeInt(u64, got_table[unnamed_const.info.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); + mem.writeInt(u64, got_table[atom.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } - self.syms.items[unnamed_const.info.sym_index.?].value = off; + self.syms.items[atom.sym_index.?].value = off; } } // the lazy data symbols @@ -747,7 +784,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No while (it_lazy.next()) |kv| { const meta = kv.value_ptr; const data_atom = if (meta.rodata_state != .unused) self.getAtomPtr(meta.rodata_atom) else continue; - const code = data_atom.code.?; + const code = data_atom.code.getOwnedCode().?; // lazy symbols must own their code foff += code.len; iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; iovecs_i += 1; @@ -795,35 +832,31 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No { var it = self.relocs.iterator(); while (it.next()) |kv| { - const source_decl_index = kv.key_ptr.*; - const source_decl = mod.declPtr(source_decl_index); + const source_atom_index = kv.key_ptr.*; + const source_atom = self.getAtom(source_atom_index); + const source_atom_symbol = self.syms.items[source_atom.sym_index.?]; for (kv.value_ptr.items) |reloc| { - const target_decl_index = reloc.target; - const target_decl = mod.declPtr(target_decl_index); - _ = target_decl; - const target_atom = self.getAtom(self.decls.get(target_decl_index).?.index); - const target_decl_offset = target_atom.offset.?; + const target_atom_index = reloc.target; + const target_atom = self.getAtomPtr(target_atom_index); + const target_symbol = self.syms.items[target_atom.sym_index.?]; + const target_offset = target_atom.offset.?; const offset = reloc.offset; const addend = reloc.addend; - const code = blk: { - const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn; - if (is_fn) { - const table = self.fn_decl_table.get(source_decl.getFileScope(mod)).?.functions; - const output = table.get(source_decl_index).?; - break :blk output.code; - } else { - const code = self.data_decl_table.get(source_decl_index).?; - break :blk code; - } - }; + const code = source_atom.code.getCode(self); - if (!self.sixtyfour_bit) { - mem.writeInt(u32, code[@intCast(usize, offset)..][0..4], @intCast(u32, target_decl_offset + addend), self.base.options.target.cpu.arch.endian()); + if (reloc.pcrel) { + const disp = @intCast(i32, target_offset) - @intCast(i32, source_atom.offset.?) - 4 - @intCast(i32, offset); + mem.writeInt(i32, code[@intCast(usize, offset)..][0..4], @intCast(i32, disp), self.base.options.target.cpu.arch.endian()); } else { - mem.writeInt(u64, code[@intCast(usize, offset)..][0..8], target_decl_offset + addend, self.base.options.target.cpu.arch.endian()); + if (!self.sixtyfour_bit) { + mem.writeInt(u32, code[@intCast(usize, offset)..][0..4], @intCast(u32, target_offset + addend), self.base.options.target.cpu.arch.endian()); + } else { + mem.writeInt(u64, code[@intCast(usize, offset)..][0..8], target_offset + addend, self.base.options.target.cpu.arch.endian()); + } } + log.debug("relocating the address of '{s}' + {d} into '{s}' + {d} (({s}[{d}] = 0x{x} + 0x{x})", .{ target_symbol.name, addend, source_atom_symbol.name, offset, source_atom_symbol.name, offset, target_offset, addend }); } } } @@ -908,18 +941,19 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { } self.freeUnnamedConsts(decl_index); { - const relocs = self.relocs.getPtr(decl_index) orelse return; + const atom_index = self.decls.get(decl_index).?.index; + const relocs = self.relocs.getPtr(atom_index) orelse return; relocs.clearAndFree(self.base.allocator); - assert(self.relocs.remove(decl_index)); + assert(self.relocs.remove(atom_index)); } } fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void { const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return; - for (unnamed_consts.items) |c| { - self.base.allocator.free(self.syms.items[c.info.sym_index.?].name); - self.base.allocator.free(c.code); - self.syms.items[c.info.sym_index.?] = aout.Sym.undefined_symbol; - self.syms_index_free_list.append(self.base.allocator, c.info.sym_index.?) catch {}; + for (unnamed_consts.items) |atom_idx| { + const atom = self.getAtom(atom_idx); + self.base.allocator.free(self.syms.items[atom.sym_index.?].name); + self.syms.items[atom.sym_index.?] = aout.Sym.undefined_symbol; + self.syms_index_free_list.append(self.base.allocator, atom.sym_index.?) catch {}; } unnamed_consts.clearAndFree(self.base.allocator); } @@ -933,6 +967,7 @@ fn createAtom(self: *Plan9) !Atom.Index { .offset = null, .sym_index = null, .got_index = null, + .code = undefined, }; return index; } @@ -992,9 +1027,6 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind const gpa = self.base.allocator; const mod = self.base.options.module.?; - const atom = self.getAtomPtr(atom_index); - const local_sym_index = atom.sym_index.?; - var required_alignment: u32 = undefined; var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); @@ -1010,7 +1042,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind .type = if (sym.kind == .code) .t else .d, .name = name, }; - self.syms.items[atom.sym_index.?] = symbol; + self.syms.items[self.getAtomPtr(atom_index).sym_index.?] = symbol; // generate the code const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| @@ -1028,7 +1060,7 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind &required_alignment, &code_buffer, .none, - .{ .parent_atom_index = @intCast(u32, local_sym_index) }, + .{ .parent_atom_index = @intCast(Atom.Index, atom_index) }, ); const code = switch (res) { .ok => code_buffer.items, @@ -1040,8 +1072,10 @@ fn updateLazySymbolAtom(self: *Plan9, sym: File.LazySymbol, atom_index: Atom.Ind // duped_code is freed when the atom is freed var duped_code = try self.base.allocator.dupe(u8, code); errdefer self.base.allocator.free(duped_code); - - atom.code = duped_code; + self.getAtomPtr(atom_index).code = .{ + .code_ptr = duped_code.ptr, + .other = .{ .code_len = duped_code.len }, + }; } pub fn deinit(self: *Plan9) void { @@ -1089,8 +1123,8 @@ pub fn deinit(self: *Plan9) void { self.syms_index_free_list.deinit(gpa); self.file_segments.deinit(gpa); self.path_arena.deinit(); - for (self.atoms.items) |atom| { - if (atom.code) |c| { + for (self.atoms.items) |a| { + if (a.code.getOwnedCode()) |c| { gpa.free(c); } } @@ -1151,7 +1185,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option } pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void { - log.debug("write sym{{name: {s}, value: {x}}}", .{ sym.name, sym.value }); + // log.debug("write sym{{name: {s}, value: {x}}}", .{ sym.name, sym.value }); if (sym.type == .bad) return; // we don't want to write free'd symbols if (!self.sixtyfour_bit) { try w.writeIntBig(u32, @intCast(u32, sym.value)); @@ -1210,6 +1244,17 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { try self.writeSym(writer, sym); } } + // unnamed consts + { + var it = self.unnamed_const_atoms.iterator(); + while (it.next()) |kv| { + const consts = kv.value_ptr; + for (consts.items) |atom_index| { + const sym = self.syms.items[self.getAtom(atom_index).sym_index.?]; + try self.writeSym(writer, sym); + } + } + } // text symbols are the hardest: // the file of a text symbol is the .z symbol before it // so we have to write everything in the right order @@ -1266,6 +1311,7 @@ pub fn getDeclVAddr( ) !u64 { const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); + // we might already know the vaddr if (decl.ty.zigTypeTag(mod) == .Fn) { var start = self.bases.text; var it_file = self.fn_decl_table.iterator(); @@ -1285,17 +1331,22 @@ pub fn getDeclVAddr( start += kv.value_ptr.len; } } + const atom_index = try self.seeDecl(decl_index); // the parent_atom_index in this case is just the decl_index of the parent - const gop = try self.relocs.getOrPut(self.base.allocator, @intToEnum(Module.Decl.Index, reloc_info.parent_atom_index)); - if (!gop.found_existing) { - gop.value_ptr.* = .{}; - } - try gop.value_ptr.append(self.base.allocator, .{ - .target = decl_index, + try self.addReloc(reloc_info.parent_atom_index, .{ + .target = atom_index, .offset = reloc_info.offset, .addend = reloc_info.addend, }); - return 0; + return 0xcafebabe; +} + +pub fn addReloc(self: *Plan9, parent_index: Atom.Index, reloc: Reloc) !void { + const gop = try self.relocs.getOrPut(self.base.allocator, parent_index); + if (!gop.found_existing) { + gop.value_ptr.* = .{}; + } + try gop.value_ptr.append(self.base.allocator, reloc); } pub fn getAtom(self: *const Plan9, index: Atom.Index) Atom { -- cgit v1.2.3 From 098b0b50ab3980a257ad0840034f21ef5349ac8b Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Wed, 14 Jun 2023 20:01:52 +0200 Subject: wasm: fix lowerParentPtr offsets The was incorrectly merged during internPool. This commit forward fixes that and reinstates the correct logic. --- src/arch/wasm/CodeGen.zig | 58 ++++++++++------------------------------------- 1 file changed, 12 insertions(+), 46 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 877db4b623..9a19ca439c 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2938,49 +2938,31 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { return WValue{ .stack = {} }; } -fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { +fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue { const mod = func.bin_file.base.options.module.?; const ptr = mod.intern_pool.indexToKey(ptr_val.ip_index).ptr; switch (ptr.addr) { .decl => |decl_index| { - return func.lowerParentPtrDecl(ptr_val, decl_index, 0); + return func.lowerParentPtrDecl(ptr_val, decl_index, offset); }, .mut_decl => |mut_decl| { const decl_index = mut_decl.decl; - return func.lowerParentPtrDecl(ptr_val, decl_index, 0); - }, - .int, .eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}), - .opt_payload => |base_ptr| { - return func.lowerParentPtr(base_ptr.toValue()); + return func.lowerParentPtrDecl(ptr_val, decl_index, offset); }, + .eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}), + .int => |base| return func.lowerConstant(base.toValue(), Type.usize), + .opt_payload => |base_ptr| return func.lowerParentPtr(base_ptr.toValue(), offset), .comptime_field => unreachable, .elem => |elem| { const index = elem.index; const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod); - const offset = index * elem_type.abiSize(mod); - const array_ptr = try func.lowerParentPtr(elem.base.toValue()); - - return switch (array_ptr) { - .memory => |ptr_| WValue{ - .memory_offset = .{ - .pointer = ptr_, - .offset = @intCast(u32, offset), - }, - }, - .memory_offset => |mem_off| WValue{ - .memory_offset = .{ - .pointer = mem_off.pointer, - .offset = @intCast(u32, offset) + mem_off.offset, - }, - }, - else => unreachable, - }; + const elem_offset = index * elem_type.abiSize(mod); + return func.lowerParentPtr(elem.base.toValue(), @intCast(u32, elem_offset + offset)); }, .field => |field| { const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); - const parent_ptr = try func.lowerParentPtr(field.base.toValue()); - const offset = switch (parent_ty.zigTypeTag(mod)) { + const field_offset = switch (parent_ty.zigTypeTag(mod)) { .Struct => switch (parent_ty.containerLayout(mod)) { .Packed => parent_ty.packedStructFieldByteOffset(@intCast(usize, field.index), mod), else => parent_ty.structFieldOffset(@intCast(usize, field.index), mod), @@ -2993,8 +2975,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - const offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); - break :blk offset; + break :blk @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); }, }, .Pointer => switch (parent_ty.ptrSize(mod)) { @@ -3007,22 +2988,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { }, else => unreachable, }; - - return switch (parent_ptr) { - .memory => |ptr_| WValue{ - .memory_offset = .{ - .pointer = ptr_, - .offset = @intCast(u32, offset), - }, - }, - .memory_offset => |mem_off| WValue{ - .memory_offset = .{ - .pointer = mem_off.pointer, - .offset = @intCast(u32, offset) + mem_off.offset, - }, - }, - else => unreachable, - }; + return func.lowerParentPtr(field.base.toValue(), @intCast(u32, offset + field_offset)); }, } } @@ -3230,7 +3196,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { .decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0), .mut_decl => |mut_decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, mut_decl.decl, 0), .int => |int| return func.lowerConstant(int.toValue(), mod.intern_pool.typeOf(int).toType()), - .opt_payload, .elem, .field => return func.lowerParentPtr(val), + .opt_payload, .elem, .field => return func.lowerParentPtr(val, 0), else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}), }, .opt => if (ty.optionalReprIsPayload(mod)) { -- cgit v1.2.3 From e3db210cf1007f87930c97c072c54b2fb8ae0b8c Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Thu, 15 Jun 2023 19:30:00 +0200 Subject: wasm: support calling alias'd function pointers When lowering a decl value we verify whether its owner decl index equals to the decl index of the decl being lowered. When this is not the case, we are lowering an alias. So instead, we will now lower the owner decl instead and call its symbol to ensure its type is being correctly generated. --- src/arch/wasm/CodeGen.zig | 11 +++++++++++ test/behavior.zig | 6 +----- 2 files changed, 12 insertions(+), 5 deletions(-) (limited to 'src/arch') diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 9a19ca439c..aa44dc2bc8 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3008,6 +3008,17 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind } const decl = mod.declPtr(decl_index); + // check if decl is an alias to a function, in which case we + // want to lower the actual decl, rather than the alias itself. + if (decl.val.getFunction(mod)) |func_val| { + if (func_val.owner_decl != decl_index) { + return func.lowerDeclRefValue(tv, func_val.owner_decl, offset); + } + } else if (decl.val.getExternFunc(mod)) |func_val| { + if (func_val.decl != decl_index) { + return func.lowerDeclRefValue(tv, func_val.decl, offset); + } + } if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) { return WValue{ .imm32 = 0xaaaaaaaa }; } diff --git a/test/behavior.zig b/test/behavior.zig index bdc3f30ede..017b5b4824 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -158,6 +158,7 @@ test { _ = @import("behavior/enum.zig"); _ = @import("behavior/error.zig"); _ = @import("behavior/eval.zig"); + _ = @import("behavior/export_self_referential_type_info.zig"); _ = @import("behavior/field_parent_ptr.zig"); _ = @import("behavior/floatop.zig"); _ = @import("behavior/fn.zig"); @@ -241,7 +242,6 @@ test { if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64 and builtin.zig_backend != .stage2_aarch64 and - builtin.zig_backend != .stage2_wasm and builtin.zig_backend != .stage2_c and builtin.zig_backend != .stage2_spirv64) { @@ -250,8 +250,4 @@ test { _ = @import("behavior/bugs/14198.zig"); _ = @import("behavior/export.zig"); } - - if (builtin.zig_backend != .stage2_wasm) { - _ = @import("behavior/export_self_referential_type_info.zig"); - } } -- cgit v1.2.3 From 259315606827620daaabf82b479e59ee710097cd Mon Sep 17 00:00:00 2001 From: r00ster91 Date: Fri, 2 Jun 2023 22:02:45 -0400 Subject: migration: std.math.{min, min3, max, max3} -> `@min` & `@max` --- doc/docgen.zig | 2 +- lib/compiler_rt/divc3.zig | 3 +- lib/compiler_rt/emutls.zig | 4 +- lib/std/Build/Cache/DepTokenizer.zig | 2 +- lib/std/Thread.zig | 6 +- lib/std/Uri.zig | 4 +- lib/std/array_hash_map.zig | 6 +- lib/std/ascii.zig | 2 +- lib/std/compress/lzma/decode.zig | 2 +- lib/std/crypto/blake3.zig | 8 +-- lib/std/crypto/ff.zig | 2 +- lib/std/crypto/ghash_polyval.zig | 2 +- lib/std/crypto/keccak_p.zig | 4 +- lib/std/crypto/poly1305.zig | 2 +- lib/std/crypto/salsa20.zig | 2 +- lib/std/crypto/scrypt.zig | 4 +- lib/std/crypto/sha3.zig | 2 +- lib/std/crypto/siphash.zig | 2 +- lib/std/debug.zig | 4 +- lib/std/dynamic_library.zig | 3 +- lib/std/event/loop.zig | 2 +- lib/std/fifo.zig | 2 +- lib/std/fmt.zig | 18 +++--- lib/std/hash/wyhash.zig | 2 +- lib/std/hash_map.zig | 6 +- lib/std/heap/arena_allocator.zig | 2 +- lib/std/heap/memory_pool.zig | 4 +- lib/std/http/protocol.zig | 2 +- lib/std/io/fixed_buffer_stream.zig | 4 +- lib/std/io/limited_reader.zig | 2 +- lib/std/io/reader.zig | 2 +- lib/std/io/writer.zig | 2 +- lib/std/math.zig | 103 +++----------------------------- lib/std/math/big/int.zig | 96 ++++++++++++++--------------- lib/std/math/ldexp.zig | 2 +- lib/std/mem.zig | 12 ++-- lib/std/net.zig | 8 +-- lib/std/os/linux.zig | 4 +- lib/std/os/linux/io_uring.zig | 4 +- lib/std/os/windows.zig | 4 +- lib/std/pdb.zig | 2 +- lib/std/rand.zig | 2 +- lib/std/sort/block.zig | 10 ++-- lib/std/zig/render.zig | 4 +- lib/std/zig/system/NativeTargetInfo.zig | 6 +- src/Sema.zig | 10 ++-- src/TypedValue.zig | 10 ++-- src/arch/x86_64/CodeGen.zig | 4 +- src/link/Elf.zig | 2 +- src/link/MachO/CodeSignature.zig | 6 +- src/link/MachO/Object.zig | 2 +- src/link/Wasm.zig | 2 +- src/link/Wasm/Object.zig | 2 +- src/main.zig | 2 +- src/translate_c.zig | 2 +- src/translate_c/ast.zig | 14 ++--- src/type.zig | 2 +- src/value.zig | 8 +-- 58 files changed, 173 insertions(+), 264 deletions(-) (limited to 'src/arch') diff --git a/doc/docgen.zig b/doc/docgen.zig index bdbde6f5d2..4a9e33fbdd 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -276,7 +276,7 @@ fn parseError(tokenizer: *Tokenizer, token: Token, comptime fmt: []const u8, arg } } { - const caret_count = std.math.min(token.end, loc.line_end) - token.start; + const caret_count = @min(token.end, loc.line_end) - token.start; var i: usize = 0; while (i < caret_count) : (i += 1) { print("~", .{}); diff --git a/lib/compiler_rt/divc3.zig b/lib/compiler_rt/divc3.zig index 4e4dba2856..c4241c1483 100644 --- a/lib/compiler_rt/divc3.zig +++ b/lib/compiler_rt/divc3.zig @@ -3,7 +3,6 @@ const isNan = std.math.isNan; const isInf = std.math.isInf; const scalbn = std.math.scalbn; const ilogb = std.math.ilogb; -const max = std.math.max; const fabs = std.math.fabs; const maxInt = std.math.maxInt; const minInt = std.math.minInt; @@ -17,7 +16,7 @@ pub inline fn divc3(comptime T: type, a: T, b: T, c_in: T, d_in: T) Complex(T) { var d = d_in; // logbw used to prevent under/over-flow - const logbw = ilogb(max(fabs(c), fabs(d))); + const logbw = ilogb(@max(fabs(c), fabs(d))); const logbw_finite = logbw != maxInt(i32) and logbw != minInt(i32); const ilogbw = if (logbw_finite) b: { c = scalbn(c, -logbw); diff --git a/lib/compiler_rt/emutls.zig b/lib/compiler_rt/emutls.zig index 05a2de97a8..47c71efadd 100644 --- a/lib/compiler_rt/emutls.zig +++ b/lib/compiler_rt/emutls.zig @@ -49,7 +49,7 @@ const simple_allocator = struct { /// Allocate a memory chunk. pub fn advancedAlloc(alignment: u29, size: usize) [*]u8 { - const minimal_alignment = std.math.max(@alignOf(usize), alignment); + const minimal_alignment = @max(@alignOf(usize), alignment); var aligned_ptr: ?*anyopaque = undefined; if (std.c.posix_memalign(&aligned_ptr, minimal_alignment, size) != 0) { @@ -170,7 +170,7 @@ const current_thread_storage = struct { // make it to contains at least 16 objects (to avoid too much // reallocation at startup). - const size = std.math.max(16, index); + const size = @max(16, index); // create a new array and store it. var array: *ObjectArray = ObjectArray.init(size); diff --git a/lib/std/Build/Cache/DepTokenizer.zig b/lib/std/Build/Cache/DepTokenizer.zig index 1a4e2ddb74..0e5224edc0 100644 --- a/lib/std/Build/Cache/DepTokenizer.zig +++ b/lib/std/Build/Cache/DepTokenizer.zig @@ -983,7 +983,7 @@ fn hexDump(out: anytype, bytes: []const u8) !void { try printDecValue(out, offset, 8); try out.writeAll(":"); try out.writeAll(" "); - var end1 = std.math.min(offset + n, offset + 8); + var end1 = @min(offset + n, offset + 8); for (bytes[offset..end1]) |b| { try out.writeAll(" "); try printHexValue(out, b, 2); diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index ed6a9383e3..76650a9072 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -541,7 +541,7 @@ const WindowsThreadImpl = struct { // Going lower makes it default to that specified in the executable (~1mb). // Its also fine if the limit here is incorrect as stack size is only a hint. var stack_size = std.math.cast(u32, config.stack_size) orelse std.math.maxInt(u32); - stack_size = std.math.max(64 * 1024, stack_size); + stack_size = @max(64 * 1024, stack_size); instance.thread.thread_handle = windows.kernel32.CreateThread( null, @@ -690,7 +690,7 @@ const PosixThreadImpl = struct { defer assert(c.pthread_attr_destroy(&attr) == .SUCCESS); // Use the same set of parameters used by the libc-less impl. - const stack_size = std.math.max(config.stack_size, c.PTHREAD_STACK_MIN); + const stack_size = @max(config.stack_size, c.PTHREAD_STACK_MIN); assert(c.pthread_attr_setstacksize(&attr, stack_size) == .SUCCESS); assert(c.pthread_attr_setguardsize(&attr, std.mem.page_size) == .SUCCESS); @@ -930,7 +930,7 @@ const LinuxThreadImpl = struct { var bytes: usize = page_size; guard_offset = bytes; - bytes += std.math.max(page_size, config.stack_size); + bytes += @max(page_size, config.stack_size); bytes = std.mem.alignForward(bytes, page_size); stack_offset = bytes; diff --git a/lib/std/Uri.zig b/lib/std/Uri.zig index 7a9755bd28..198ab461ae 100644 --- a/lib/std/Uri.zig +++ b/lib/std/Uri.zig @@ -177,13 +177,13 @@ pub fn parseWithoutScheme(text: []const u8) ParseError!Uri { if (std.mem.lastIndexOf(u8, authority, ":")) |index| { if (index >= end_of_host) { // if not part of the V6 address field - end_of_host = std.math.min(end_of_host, index); + end_of_host = @min(end_of_host, index); uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort; } } } else if (std.mem.lastIndexOf(u8, authority, ":")) |index| { if (index >= start_of_host) { // if not part of the userinfo field - end_of_host = std.math.min(end_of_host, index); + end_of_host = @min(end_of_host, index); uri.port = std.fmt.parseInt(u16, authority[index + 1 ..], 10) catch return error.InvalidPort; } } diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 55b9aac6e4..b46b5c12f0 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -815,9 +815,9 @@ pub fn ArrayHashMapUnmanaged( /// no longer guaranteed that no allocations will be performed. pub fn capacity(self: Self) usize { const entry_cap = self.entries.capacity; - const header = self.index_header orelse return math.min(linear_scan_max, entry_cap); + const header = self.index_header orelse return @min(linear_scan_max, entry_cap); const indexes_cap = header.capacity(); - return math.min(entry_cap, indexes_cap); + return @min(entry_cap, indexes_cap); } /// Clobbers any existing data. To detect if a put would clobber @@ -1821,7 +1821,7 @@ fn Index(comptime I: type) type { /// length * the size of an Index(u32). The index is 8 bytes (3 bits repr) /// and max_usize + 1 is not representable, so we need to subtract out 4 bits. const max_representable_index_len = @bitSizeOf(usize) - 4; -const max_bit_index = math.min(32, max_representable_index_len); +const max_bit_index = @min(32, max_representable_index_len); const min_bit_index = 5; const max_capacity = (1 << max_bit_index) - 1; const index_capacities = blk: { diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig index 941f398f20..e47ef4db65 100644 --- a/lib/std/ascii.zig +++ b/lib/std/ascii.zig @@ -422,7 +422,7 @@ test "indexOfIgnoreCase" { /// Returns the lexicographical order of two slices. O(n). pub fn orderIgnoreCase(lhs: []const u8, rhs: []const u8) std.math.Order { - const n = std.math.min(lhs.len, rhs.len); + const n = @min(lhs.len, rhs.len); var i: usize = 0; while (i < n) : (i += 1) { switch (std.math.order(toLower(lhs[i]), toLower(rhs[i]))) { diff --git a/lib/std/compress/lzma/decode.zig b/lib/std/compress/lzma/decode.zig index dc220d8e87..f539abf8b1 100644 --- a/lib/std/compress/lzma/decode.zig +++ b/lib/std/compress/lzma/decode.zig @@ -59,7 +59,7 @@ pub const Params = struct { const pb = @intCast(u3, props); const dict_size_provided = try reader.readIntLittle(u32); - const dict_size = math.max(0x1000, dict_size_provided); + const dict_size = @max(0x1000, dict_size_provided); const unpacked_size = switch (options.unpacked_size) { .read_from_header => blk: { diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig index fb580fda13..7ad1511e79 100644 --- a/lib/std/crypto/blake3.zig +++ b/lib/std/crypto/blake3.zig @@ -20,7 +20,7 @@ const ChunkIterator = struct { } fn next(self: *ChunkIterator) ?[]u8 { - const next_chunk = self.slice[0..math.min(self.chunk_len, self.slice.len)]; + const next_chunk = self.slice[0..@min(self.chunk_len, self.slice.len)]; self.slice = self.slice[next_chunk.len..]; return if (next_chunk.len > 0) next_chunk else null; } @@ -283,7 +283,7 @@ const ChunkState = struct { fn fillBlockBuf(self: *ChunkState, input: []const u8) []const u8 { const want = BLOCK_LEN - self.block_len; - const take = math.min(want, input.len); + const take = @min(want, input.len); @memcpy(self.block[self.block_len..][0..take], input[0..take]); self.block_len += @truncate(u8, take); return input[take..]; @@ -450,7 +450,7 @@ pub const Blake3 = struct { // Compress input bytes into the current chunk state. const want = CHUNK_LEN - self.chunk_state.len(); - const take = math.min(want, input.len); + const take = @min(want, input.len); self.chunk_state.update(input[0..take]); input = input[take..]; } @@ -663,7 +663,7 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void { // Write repeating input pattern to hasher var input_counter = input_len; while (input_counter > 0) { - const update_len = math.min(input_counter, input_pattern.len); + const update_len = @min(input_counter, input_pattern.len); hasher.update(input_pattern[0..update_len]); input_counter -= update_len; } diff --git a/lib/std/crypto/ff.zig b/lib/std/crypto/ff.zig index 84753ddefb..37e3d1c1b3 100644 --- a/lib/std/crypto/ff.zig +++ b/lib/std/crypto/ff.zig @@ -570,7 +570,7 @@ pub fn Modulus(comptime max_bits: comptime_int) type { var out = self.zero; var i = x.limbs_count() - 1; if (self.limbs_count() >= 2) { - const start = math.min(i, self.limbs_count() - 2); + const start = @min(i, self.limbs_count() - 2); var j = start; while (true) : (j -= 1) { out.v.limbs.set(j, x.limbs.get(i)); diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig index 46645d710f..2fbff25f72 100644 --- a/lib/std/crypto/ghash_polyval.zig +++ b/lib/std/crypto/ghash_polyval.zig @@ -363,7 +363,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { var mb = m; if (st.leftover > 0) { - const want = math.min(block_length - st.leftover, mb.len); + const want = @min(block_length - st.leftover, mb.len); const mc = mb[0..want]; for (mc, 0..) |x, i| { st.buf[st.leftover + i] = x; diff --git a/lib/std/crypto/keccak_p.zig b/lib/std/crypto/keccak_p.zig index 9226f2f6d4..ddc9b1b847 100644 --- a/lib/std/crypto/keccak_p.zig +++ b/lib/std/crypto/keccak_p.zig @@ -214,7 +214,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti pub fn absorb(self: *Self, bytes_: []const u8) void { var bytes = bytes_; if (self.offset > 0) { - const left = math.min(rate - self.offset, bytes.len); + const left = @min(rate - self.offset, bytes.len); @memcpy(self.buf[self.offset..][0..left], bytes[0..left]); self.offset += left; if (self.offset == rate) { @@ -249,7 +249,7 @@ pub fn State(comptime f: u11, comptime capacity: u11, comptime delim: u8, compti pub fn squeeze(self: *Self, out: []u8) void { var i: usize = 0; while (i < out.len) : (i += rate) { - const left = math.min(rate, out.len - i); + const left = @min(rate, out.len - i); self.st.extractBytes(out[i..][0..left]); self.st.permuteR(rounds); } diff --git a/lib/std/crypto/poly1305.zig b/lib/std/crypto/poly1305.zig index a2873f1145..51e1c2ab24 100644 --- a/lib/std/crypto/poly1305.zig +++ b/lib/std/crypto/poly1305.zig @@ -112,7 +112,7 @@ pub const Poly1305 = struct { // handle leftover if (st.leftover > 0) { - const want = std.math.min(block_length - st.leftover, mb.len); + const want = @min(block_length - st.leftover, mb.len); const mc = mb[0..want]; for (mc, 0..) |x, i| { st.buf[st.leftover + i] = x; diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig index 7f57e6cecb..c8a639ad0b 100644 --- a/lib/std/crypto/salsa20.zig +++ b/lib/std/crypto/salsa20.zig @@ -404,7 +404,7 @@ pub const XSalsa20Poly1305 = struct { debug.assert(c.len == m.len); const extended = extend(rounds, k, npub); var block0 = [_]u8{0} ** 64; - const mlen0 = math.min(32, c.len); + const mlen0 = @min(32, c.len); @memcpy(block0[32..][0..mlen0], c[0..mlen0]); Salsa20.xor(block0[0..], block0[0..], 0, extended.key, extended.nonce); var mac = Poly1305.init(block0[0..32]); diff --git a/lib/std/crypto/scrypt.zig b/lib/std/crypto/scrypt.zig index b8e8ef55e2..97dd9b95d0 100644 --- a/lib/std/crypto/scrypt.zig +++ b/lib/std/crypto/scrypt.zig @@ -143,7 +143,7 @@ pub const Params = struct { /// Create parameters from ops and mem limits, where mem_limit given in bytes pub fn fromLimits(ops_limit: u64, mem_limit: usize) Self { - const ops = math.max(32768, ops_limit); + const ops = @max(32768, ops_limit); const r: u30 = 8; if (ops < mem_limit / 32) { const max_n = ops / (r * 4); @@ -151,7 +151,7 @@ pub const Params = struct { } else { const max_n = mem_limit / (@intCast(usize, r) * 128); const ln = @intCast(u6, math.log2(max_n)); - const max_rp = math.min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln)); + const max_rp = @min(0x3fffffff, (ops / 4) / (@as(u64, 1) << ln)); return Self{ .r = r, .p = @intCast(u30, max_rp / @as(u64, r)), .ln = ln }; } } diff --git a/lib/std/crypto/sha3.zig b/lib/std/crypto/sha3.zig index 23f9e65534..0226490881 100644 --- a/lib/std/crypto/sha3.zig +++ b/lib/std/crypto/sha3.zig @@ -148,7 +148,7 @@ fn ShakeLike(comptime security_level: u11, comptime delim: u8, comptime rounds: if (self.offset > 0) { const left = self.buf.len - self.offset; if (left > 0) { - const n = math.min(left, out.len); + const n = @min(left, out.len); @memcpy(out[0..n], self.buf[self.offset..][0..n]); out = out[n..]; self.offset += n; diff --git a/lib/std/crypto/siphash.zig b/lib/std/crypto/siphash.zig index 37d219f868..70f4f2fd53 100644 --- a/lib/std/crypto/siphash.zig +++ b/lib/std/crypto/siphash.zig @@ -433,7 +433,7 @@ test "iterative non-divisible update" { var siphash = Siphash.init(key); var i: usize = 0; while (i < end) : (i += 7) { - siphash.update(buf[i..std.math.min(i + 7, end)]); + siphash.update(buf[i..@min(i + 7, end)]); } const iterative_hash = siphash.finalInt(); diff --git a/lib/std/debug.zig b/lib/std/debug.zig index ea0d467085..3015c30bfb 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -198,7 +198,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT stack_trace.index = 0; return; }; - const end_index = math.min(first_index + addrs.len, n); + const end_index = @min(first_index + addrs.len, n); const slice = addr_buf[first_index..end_index]; // We use a for loop here because slice and addrs may alias. for (slice, 0..) |addr, i| { @@ -380,7 +380,7 @@ pub fn writeStackTrace( _ = allocator; if (builtin.strip_debug_info) return error.MissingDebugInfo; var frame_index: usize = 0; - var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len); + var frames_left: usize = @min(stack_trace.index, stack_trace.instruction_addresses.len); while (frames_left != 0) : ({ frames_left -= 1; diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 59ad7429cf..94da2f4d6d 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -8,7 +8,6 @@ const elf = std.elf; const windows = std.os.windows; const system = std.os.system; const maxInt = std.math.maxInt; -const max = std.math.max; pub const DynLib = switch (builtin.os.tag) { .linux => if (builtin.link_libc) DlDynlib else ElfDynLib, @@ -152,7 +151,7 @@ pub const ElfDynLib = struct { }) { const ph = @intToPtr(*elf.Phdr, ph_addr); switch (ph.p_type) { - elf.PT_LOAD => virt_addr_end = max(virt_addr_end, ph.p_vaddr + ph.p_memsz), + elf.PT_LOAD => virt_addr_end = @max(virt_addr_end, ph.p_vaddr + ph.p_memsz), elf.PT_DYNAMIC => maybe_dynv = @intToPtr([*]usize, elf_addr + ph.p_offset), else => {}, } diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig index c8d41d3eb0..bc0162423b 100644 --- a/lib/std/event/loop.zig +++ b/lib/std/event/loop.zig @@ -179,7 +179,7 @@ pub const Loop = struct { // We need at least one of these in case the fs thread wants to use onNextTick const extra_thread_count = thread_count - 1; - const resume_node_count = std.math.max(extra_thread_count, 1); + const resume_node_count = @max(extra_thread_count, 1); self.eventfd_resume_nodes = try self.arena.allocator().alloc( std.atomic.Stack(ResumeNode.EventFd).Node, resume_node_count, diff --git a/lib/std/fifo.zig b/lib/std/fifo.zig index bc88e61d76..535376d38f 100644 --- a/lib/std/fifo.zig +++ b/lib/std/fifo.zig @@ -150,7 +150,7 @@ pub fn LinearFifo( start -= self.buf.len; return self.buf[start .. start + (self.count - offset)]; } else { - const end = math.min(self.head + self.count, self.buf.len); + const end = @min(self.head + self.count, self.buf.len); return self.buf[start..end]; } } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 6896d0a7a0..c9d8e611ca 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -921,8 +921,8 @@ fn formatSizeImpl(comptime base: comptime_int) type { const log2 = math.log2(value); const magnitude = switch (base) { - 1000 => math.min(log2 / comptime math.log2(1000), mags_si.len - 1), - 1024 => math.min(log2 / 10, mags_iec.len - 1), + 1000 => @min(log2 / comptime math.log2(1000), mags_si.len - 1), + 1024 => @min(log2 / 10, mags_iec.len - 1), else => unreachable, }; const new_value = lossyCast(f64, value) / math.pow(f64, lossyCast(f64, base), lossyCast(f64, magnitude)); @@ -1103,7 +1103,7 @@ pub fn formatFloatScientific( var printed: usize = 0; if (float_decimal.digits.len > 1) { - const num_digits = math.min(float_decimal.digits.len, precision + 1); + const num_digits = @min(float_decimal.digits.len, precision + 1); try writer.writeAll(float_decimal.digits[1..num_digits]); printed += num_digits - 1; } @@ -1116,7 +1116,7 @@ pub fn formatFloatScientific( try writer.writeAll(float_decimal.digits[0..1]); try writer.writeAll("."); if (float_decimal.digits.len > 1) { - const num_digits = if (@TypeOf(value) == f32) math.min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len; + const num_digits = if (@TypeOf(value) == f32) @min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len; try writer.writeAll(float_decimal.digits[1..num_digits]); } else { @@ -1299,7 +1299,7 @@ pub fn formatFloatDecimal( var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. - var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len); + var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); if (num_digits_whole > 0) { // We may have to zero pad, for instance 1e4 requires zero padding. @@ -1326,7 +1326,7 @@ pub fn formatFloatDecimal( // Zero-fill until we reach significant digits or run out of precision. if (float_decimal.exp <= 0) { const zero_digit_count = @intCast(usize, -float_decimal.exp); - const zeros_to_print = math.min(zero_digit_count, precision); + const zeros_to_print = @min(zero_digit_count, precision); var i: usize = 0; while (i < zeros_to_print) : (i += 1) { @@ -1357,7 +1357,7 @@ pub fn formatFloatDecimal( var num_digits_whole = if (float_decimal.exp > 0) @intCast(usize, float_decimal.exp) else 0; // the actual slice into the buffer, we may need to zero-pad between num_digits_whole and this. - var num_digits_whole_no_pad = math.min(num_digits_whole, float_decimal.digits.len); + var num_digits_whole_no_pad = @min(num_digits_whole, float_decimal.digits.len); if (num_digits_whole > 0) { // We may have to zero pad, for instance 1e4 requires zero padding. @@ -1410,12 +1410,12 @@ pub fn formatInt( // The type must have the same size as `base` or be wider in order for the // division to work - const min_int_bits = comptime math.max(value_info.bits, 8); + const min_int_bits = comptime @max(value_info.bits, 8); const MinInt = std.meta.Int(.unsigned, min_int_bits); const abs_value = math.absCast(int_value); // The worst case in terms of space needed is base 2, plus 1 for the sign - var buf: [1 + math.max(value_info.bits, 1)]u8 = undefined; + var buf: [1 + @max(@as(comptime_int, value_info.bits), 1)]u8 = undefined; var a: MinInt = abs_value; var index: usize = buf.len; diff --git a/lib/std/hash/wyhash.zig b/lib/std/hash/wyhash.zig index 3426bca9f4..c36c3fe87c 100644 --- a/lib/std/hash/wyhash.zig +++ b/lib/std/hash/wyhash.zig @@ -252,7 +252,7 @@ test "iterative non-divisible update" { var wy = Wyhash.init(seed); var i: usize = 0; while (i < end) : (i += 33) { - wy.update(buf[i..std.math.min(i + 33, end)]); + wy.update(buf[i..@min(i + 33, end)]); } const iterative_hash = wy.final(); diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 041d99606e..5b539ddaad 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1507,7 +1507,7 @@ pub fn HashMapUnmanaged( fn grow(self: *Self, allocator: Allocator, new_capacity: Size, ctx: Context) Allocator.Error!void { @setCold(true); - const new_cap = std.math.max(new_capacity, minimal_capacity); + const new_cap = @max(new_capacity, minimal_capacity); assert(new_cap > self.capacity()); assert(std.math.isPowerOfTwo(new_cap)); @@ -1540,7 +1540,7 @@ pub fn HashMapUnmanaged( const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); - const max_align = comptime math.max3(header_align, key_align, val_align); + const max_align = comptime @max(header_align, key_align, val_align); const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); @@ -1575,7 +1575,7 @@ pub fn HashMapUnmanaged( const header_align = @alignOf(Header); const key_align = if (@sizeOf(K) == 0) 1 else @alignOf(K); const val_align = if (@sizeOf(V) == 0) 1 else @alignOf(V); - const max_align = comptime math.max3(header_align, key_align, val_align); + const max_align = comptime @max(header_align, key_align, val_align); const cap = self.capacity(); const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index c0eeae6e61..c7e0569067 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -110,7 +110,7 @@ pub const ArenaAllocator = struct { // value. const requested_capacity = switch (mode) { .retain_capacity => self.queryCapacity(), - .retain_with_limit => |limit| std.math.min(limit, self.queryCapacity()), + .retain_with_limit => |limit| @min(limit, self.queryCapacity()), .free_all => 0, }; if (requested_capacity == 0) { diff --git a/lib/std/heap/memory_pool.zig b/lib/std/heap/memory_pool.zig index ca6eb7f518..3fc7dfbfca 100644 --- a/lib/std/heap/memory_pool.zig +++ b/lib/std/heap/memory_pool.zig @@ -40,11 +40,11 @@ pub fn MemoryPoolExtra(comptime Item: type, comptime pool_options: Options) type /// Size of the memory pool items. This is not necessarily the same /// as `@sizeOf(Item)` as the pool also uses the items for internal means. - pub const item_size = std.math.max(@sizeOf(Node), @sizeOf(Item)); + pub const item_size = @max(@sizeOf(Node), @sizeOf(Item)); /// Alignment of the memory pool items. This is not necessarily the same /// as `@alignOf(Item)` as the pool also uses the items for internal means. - pub const item_alignment = std.math.max(@alignOf(Node), pool_options.alignment orelse 0); + pub const item_alignment = @max(@alignOf(Node), pool_options.alignment orelse 0); const Node = struct { next: ?*@This(), diff --git a/lib/std/http/protocol.zig b/lib/std/http/protocol.zig index b001b3cddf..b5c2cdfa0c 100644 --- a/lib/std/http/protocol.zig +++ b/lib/std/http/protocol.zig @@ -82,7 +82,7 @@ pub const HeadersParser = struct { /// If the amount returned is less than `bytes.len`, you may assume that the parser is in a content state and the /// first byte of content is located at `bytes[result]`. pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 { - const vector_len: comptime_int = comptime std.math.max(std.simd.suggestVectorSize(u8) orelse 1, 8); + const vector_len: comptime_int = comptime @max(std.simd.suggestVectorSize(u8) orelse 1, 8); const len = @intCast(u32, bytes.len); var index: u32 = 0; diff --git a/lib/std/io/fixed_buffer_stream.zig b/lib/std/io/fixed_buffer_stream.zig index c170dd1f74..27b978744c 100644 --- a/lib/std/io/fixed_buffer_stream.zig +++ b/lib/std/io/fixed_buffer_stream.zig @@ -76,7 +76,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type { } pub fn seekTo(self: *Self, pos: u64) SeekError!void { - self.pos = if (std.math.cast(usize, pos)) |x| std.math.min(self.buffer.len, x) else self.buffer.len; + self.pos = if (std.math.cast(usize, pos)) |x| @min(self.buffer.len, x) else self.buffer.len; } pub fn seekBy(self: *Self, amt: i64) SeekError!void { @@ -91,7 +91,7 @@ pub fn FixedBufferStream(comptime Buffer: type) type { } else { const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize); const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize); - self.pos = std.math.min(self.buffer.len, new_pos); + self.pos = @min(self.buffer.len, new_pos); } } diff --git a/lib/std/io/limited_reader.zig b/lib/std/io/limited_reader.zig index aa00af0d09..09d76007da 100644 --- a/lib/std/io/limited_reader.zig +++ b/lib/std/io/limited_reader.zig @@ -14,7 +14,7 @@ pub fn LimitedReader(comptime ReaderType: type) type { const Self = @This(); pub fn read(self: *Self, dest: []u8) Error!usize { - const max_read = std.math.min(self.bytes_left, dest.len); + const max_read = @min(self.bytes_left, dest.len); const n = try self.inner_reader.read(dest[0..max_read]); self.bytes_left -= n; return n; diff --git a/lib/std/io/reader.zig b/lib/std/io/reader.zig index 344515d07b..abdca56d3c 100644 --- a/lib/std/io/reader.zig +++ b/lib/std/io/reader.zig @@ -325,7 +325,7 @@ pub fn Reader( var remaining = num_bytes; while (remaining > 0) { - const amt = std.math.min(remaining, options.buf_size); + const amt = @min(remaining, options.buf_size); try self.readNoEof(buf[0..amt]); remaining -= amt; } diff --git a/lib/std/io/writer.zig b/lib/std/io/writer.zig index cfc76de452..d0b7fa11ee 100644 --- a/lib/std/io/writer.zig +++ b/lib/std/io/writer.zig @@ -39,7 +39,7 @@ pub fn Writer( var remaining: usize = n; while (remaining > 0) { - const to_write = std.math.min(remaining, bytes.len); + const to_write = @min(remaining, bytes.len); try self.writeAll(bytes[0..to_write]); remaining -= to_write; } diff --git a/lib/std/math.zig b/lib/std/math.zig index 46a7e40a37..e60e964747 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -165,7 +165,7 @@ pub fn approxEqRel(comptime T: type, x: T, y: T, tolerance: T) bool { if (isNan(x) or isNan(y)) return false; - return @fabs(x - y) <= max(@fabs(x), @fabs(y)) * tolerance; + return @fabs(x - y) <= @max(@fabs(x), @fabs(y)) * tolerance; } test "approxEqAbs and approxEqRel" { @@ -434,104 +434,15 @@ pub fn Min(comptime A: type, comptime B: type) type { return @TypeOf(@as(A, 0) + @as(B, 0)); } -/// Returns the smaller number. When one parameter's type's full range -/// fits in the other, the return type is the smaller type. -pub fn min(x: anytype, y: anytype) Min(@TypeOf(x), @TypeOf(y)) { - const Result = Min(@TypeOf(x), @TypeOf(y)); - if (x < y) { - // TODO Zig should allow this as an implicit cast because x is - // immutable and in this scope it is known to fit in the - // return type. - switch (@typeInfo(Result)) { - .Int => return @intCast(Result, x), - else => return x, - } - } else { - // TODO Zig should allow this as an implicit cast because y is - // immutable and in this scope it is known to fit in the - // return type. - switch (@typeInfo(Result)) { - .Int => return @intCast(Result, y), - else => return y, - } - } -} - -test "min" { - try testing.expect(min(@as(i32, -1), @as(i32, 2)) == -1); - { - var a: u16 = 999; - var b: u32 = 10; - var result = min(a, b); - try testing.expect(@TypeOf(result) == u16); - try testing.expect(result == 10); - } - { - var a: f64 = 10.34; - var b: f32 = 999.12; - var result = min(a, b); - try testing.expect(@TypeOf(result) == f64); - try testing.expect(result == 10.34); - } - { - var a: i8 = -127; - var b: i16 = -200; - var result = min(a, b); - try testing.expect(@TypeOf(result) == i16); - try testing.expect(result == -200); - } - { - const a = 10.34; - var b: f32 = 999.12; - var result = min(a, b); - try testing.expect(@TypeOf(result) == f32); - try testing.expect(result == 10.34); - } -} - -/// Finds the minimum of three numbers. -pub fn min3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) { - return min(x, min(y, z)); -} - -test "min3" { - try testing.expect(min3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 0); - try testing.expect(min3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 0); - try testing.expect(min3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 0); - try testing.expect(min3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 0); - try testing.expect(min3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 0); - try testing.expect(min3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 0); -} - -/// Returns the maximum of two numbers. Return type is the one with the -/// larger range. -pub fn max(x: anytype, y: anytype) @TypeOf(x, y) { - return if (x > y) x else y; -} - -test "max" { - try testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2); - try testing.expect(max(@as(i32, 2), @as(i32, -1)) == 2); -} - -/// Finds the maximum of three numbers. -pub fn max3(x: anytype, y: anytype, z: anytype) @TypeOf(x, y, z) { - return max(x, max(y, z)); -} - -test "max3" { - try testing.expect(max3(@as(i32, 0), @as(i32, 1), @as(i32, 2)) == 2); - try testing.expect(max3(@as(i32, 0), @as(i32, 2), @as(i32, 1)) == 2); - try testing.expect(max3(@as(i32, 1), @as(i32, 0), @as(i32, 2)) == 2); - try testing.expect(max3(@as(i32, 1), @as(i32, 2), @as(i32, 0)) == 2); - try testing.expect(max3(@as(i32, 2), @as(i32, 0), @as(i32, 1)) == 2); - try testing.expect(max3(@as(i32, 2), @as(i32, 1), @as(i32, 0)) == 2); -} +pub const min = @compileError("deprecated; use @min instead"); +pub const max = @compileError("deprecated; use @max instead"); +pub const min3 = @compileError("deprecated; use @min instead"); +pub const max3 = @compileError("deprecated; use @max instead"); /// Limit val to the inclusive range [lower, upper]. pub fn clamp(val: anytype, lower: anytype, upper: anytype) @TypeOf(val, lower, upper) { assert(lower <= upper); - return max(lower, min(val, upper)); + return @max(lower, @min(val, upper)); } test "clamp" { // Within range @@ -795,7 +706,7 @@ pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) t return u0; } const signedness: std.builtin.Signedness = if (from < 0) .signed else .unsigned; - const largest_positive_integer = max(if (from < 0) (-from) - 1 else from, to); // two's complement + const largest_positive_integer = @max(if (from < 0) (-from) - 1 else from, to); // two's complement const base = log2(largest_positive_integer); const upper = (1 << base) - 1; var magnitude_bits = if (upper >= largest_positive_integer) base else base + 1; diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index ec79d843da..487812e1de 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -44,12 +44,12 @@ pub fn calcDivLimbsBufferLen(a_len: usize, b_len: usize) usize { } pub fn calcMulLimbsBufferLen(a_len: usize, b_len: usize, aliases: usize) usize { - return aliases * math.max(a_len, b_len); + return aliases * @max(a_len, b_len); } pub fn calcMulWrapLimbsBufferLen(bit_count: usize, a_len: usize, b_len: usize, aliases: usize) usize { const req_limbs = calcTwosCompLimbCount(bit_count); - return aliases * math.min(req_limbs, math.max(a_len, b_len)); + return aliases * @min(req_limbs, @max(a_len, b_len)); } pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize { @@ -396,7 +396,7 @@ pub const Mutable = struct { /// scalar is a primitive integer type. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`. + /// r is `@max(a.limbs.len, calcLimbLen(scalar)) + 1`. pub fn addScalar(r: *Mutable, a: Const, scalar: anytype) void { // Normally we could just determine the number of limbs needed with calcLimbLen, // but that is not comptime-known when scalar is not a comptime_int. Instead, we @@ -414,11 +414,11 @@ pub const Mutable = struct { return add(r, a, operand); } - /// Base implementation for addition. Adds `max(a.limbs.len, b.limbs.len)` elements from a and b, + /// Base implementation for addition. Adds `@max(a.limbs.len, b.limbs.len)` elements from a and b, /// and returns whether any overflow occurred. /// r, a and b may be aliases. /// - /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`. + /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`. fn addCarry(r: *Mutable, a: Const, b: Const) bool { if (a.eqZero()) { r.copy(b); @@ -452,12 +452,12 @@ pub const Mutable = struct { /// r, a and b may be aliases. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// r is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn add(r: *Mutable, a: Const, b: Const) void { if (r.addCarry(a, b)) { // Fix up the result. Note that addCarry normalizes by a.limbs.len or b.limbs.len, // so we need to set the length here. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); // `[add|sub]Carry` normalizes by `msl`, so we need to fix up the result manually here. // Note, the fact that it normalized means that the intermediary limbs are zero here. r.len = msl + 1; @@ -477,12 +477,12 @@ pub const Mutable = struct { // if an overflow occurred. const x = Const{ .positive = a.positive, - .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)], + .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)], }; const y = Const{ .positive = b.positive, - .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)], + .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)], }; var carry_truncated = false; @@ -492,7 +492,7 @@ pub const Mutable = struct { // truncate anyway. // - a and b had less elements than req_limbs, and those were overflowed. This case needs to be handled. // Note: after this we still might need to wrap. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); if (msl < req_limbs) { r.limbs[msl] = 1; r.len = req_limbs; @@ -522,12 +522,12 @@ pub const Mutable = struct { // if an overflow occurred. const x = Const{ .positive = a.positive, - .limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)], + .limbs = a.limbs[0..@min(req_limbs, a.limbs.len)], }; const y = Const{ .positive = b.positive, - .limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)], + .limbs = b.limbs[0..@min(req_limbs, b.limbs.len)], }; if (r.addCarry(x, y)) { @@ -535,7 +535,7 @@ pub const Mutable = struct { // - We overflowed req_limbs, in which case we need to saturate. // - a and b had less elements than req_limbs, and those were overflowed. // Note: In this case, might _also_ need to saturate. - const msl = math.max(a.limbs.len, b.limbs.len); + const msl = @max(a.limbs.len, b.limbs.len); if (msl < req_limbs) { r.limbs[msl] = 1; r.len = req_limbs; @@ -550,11 +550,11 @@ pub const Mutable = struct { r.saturate(r.toConst(), signedness, bit_count); } - /// Base implementation for subtraction. Subtracts `max(a.limbs.len, b.limbs.len)` elements from a and b, + /// Base implementation for subtraction. Subtracts `@max(a.limbs.len, b.limbs.len)` elements from a and b, /// and returns whether any overflow occurred. /// r, a and b may be aliases. /// - /// Asserts r has enough elements to hold the result. The upper bound is `max(a.limbs.len, b.limbs.len)`. + /// Asserts r has enough elements to hold the result. The upper bound is `@max(a.limbs.len, b.limbs.len)`. fn subCarry(r: *Mutable, a: Const, b: Const) bool { if (a.eqZero()) { r.copy(b); @@ -607,7 +607,7 @@ pub const Mutable = struct { /// r, a and b may be aliases. /// /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by - /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive. + /// r is `@max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive. pub fn sub(r: *Mutable, a: Const, b: Const) void { r.add(a, b.negate()); } @@ -714,7 +714,7 @@ pub const Mutable = struct { const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: { const start = buf_index; - const a_len = math.min(req_limbs, a.limbs.len); + const a_len = @min(req_limbs, a.limbs.len); @memcpy(limbs_buffer[buf_index..][0..a_len], a.limbs[0..a_len]); buf_index += a_len; break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst(); @@ -722,7 +722,7 @@ pub const Mutable = struct { const b_copy = if (rma.limbs.ptr == b.limbs.ptr) blk: { const start = buf_index; - const b_len = math.min(req_limbs, b.limbs.len); + const b_len = @min(req_limbs, b.limbs.len); @memcpy(limbs_buffer[buf_index..][0..b_len], b.limbs[0..b_len]); buf_index += b_len; break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst(); @@ -755,13 +755,13 @@ pub const Mutable = struct { const req_limbs = calcTwosCompLimbCount(bit_count); // We can ignore the upper bits here, those results will be discarded anyway. - const a_limbs = a.limbs[0..math.min(req_limbs, a.limbs.len)]; - const b_limbs = b.limbs[0..math.min(req_limbs, b.limbs.len)]; + const a_limbs = a.limbs[0..@min(req_limbs, a.limbs.len)]; + const b_limbs = b.limbs[0..@min(req_limbs, b.limbs.len)]; @memset(rma.limbs[0..req_limbs], 0); llmulacc(.add, allocator, rma.limbs, a_limbs, b_limbs); - rma.normalize(math.min(req_limbs, a.limbs.len + b.limbs.len)); + rma.normalize(@min(req_limbs, a.limbs.len + b.limbs.len)); rma.positive = (a.positive == b.positive); rma.truncate(rma.toConst(), signedness, bit_count); } @@ -1211,7 +1211,7 @@ pub const Mutable = struct { /// /// a and b are zero-extended to the longer of a or b. /// - /// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`. + /// Asserts that r has enough limbs to store the result. Upper bound is `@max(a.limbs.len, b.limbs.len)`. pub fn bitOr(r: *Mutable, a: Const, b: Const) void { // Trivial cases, llsignedor does not support zero. if (a.eqZero()) { @@ -1235,8 +1235,8 @@ pub const Mutable = struct { /// r may alias with a or b. /// /// Asserts that r has enough limbs to store the result. - /// If a or b is positive, the upper bound is `math.min(a.limbs.len, b.limbs.len)`. - /// If a and b are negative, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// If a or b is positive, the upper bound is `@min(a.limbs.len, b.limbs.len)`. + /// If a and b are negative, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn bitAnd(r: *Mutable, a: Const, b: Const) void { // Trivial cases, llsignedand does not support zero. if (a.eqZero()) { @@ -1260,8 +1260,8 @@ pub const Mutable = struct { /// r may alias with a or b. /// /// Asserts that r has enough limbs to store the result. If a and b share the same signedness, the - /// upper bound is `math.max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative - /// but not both, the upper bound is `math.max(a.limbs.len, b.limbs.len) + 1`. + /// upper bound is `@max(a.limbs.len, b.limbs.len)`. Otherwise, if either a or b is negative + /// but not both, the upper bound is `@max(a.limbs.len, b.limbs.len) + 1`. pub fn bitXor(r: *Mutable, a: Const, b: Const) void { // Trivial cases, because llsignedxor does not support negative zero. if (a.eqZero()) { @@ -1284,7 +1284,7 @@ pub const Mutable = struct { /// rma may alias x or y. /// x and y may alias each other. /// Asserts that `rma` has enough limbs to store the result. Upper bound is - /// `math.min(x.limbs.len, y.limbs.len)`. + /// `@min(x.limbs.len, y.limbs.len)`. /// /// `limbs_buffer` is used for temporary storage during the operation. When this function returns, /// it will have the same length as it had when the function was called. @@ -1546,7 +1546,7 @@ pub const Mutable = struct { if (yi != 0) break i; } else unreachable; - const xy_trailing = math.min(x_trailing, y_trailing); + const xy_trailing = @min(x_trailing, y_trailing); if (y.len - xy_trailing == 1) { const divisor = y.limbs[y.len - 1]; @@ -2589,7 +2589,7 @@ pub const Managed = struct { .allocator = allocator, .metadata = 1, .limbs = block: { - const limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity)); + const limbs = try allocator.alloc(Limb, @max(default_capacity, capacity)); limbs[0] = 0; break :block limbs; }, @@ -2918,7 +2918,7 @@ pub const Managed = struct { /// /// Returns an error if memory could not be allocated. pub fn sub(r: *Managed, a: *const Managed, b: *const Managed) !void { - try r.ensureCapacity(math.max(a.len(), b.len()) + 1); + try r.ensureCapacity(@max(a.len(), b.len()) + 1); var m = r.toMutable(); m.sub(a.toConst(), b.toConst()); r.setMetadata(m.positive, m.len); @@ -3025,11 +3025,11 @@ pub const Managed = struct { } pub fn ensureAddScalarCapacity(r: *Managed, a: Const, scalar: anytype) !void { - try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1); + try r.ensureCapacity(@max(a.limbs.len, calcLimbLen(scalar)) + 1); } pub fn ensureAddCapacity(r: *Managed, a: Const, b: Const) !void { - try r.ensureCapacity(math.max(a.limbs.len, b.limbs.len) + 1); + try r.ensureCapacity(@max(a.limbs.len, b.limbs.len) + 1); } pub fn ensureMulCapacity(rma: *Managed, a: Const, b: Const) !void { @@ -3123,7 +3123,7 @@ pub const Managed = struct { /// /// a and b are zero-extended to the longer of a or b. pub fn bitOr(r: *Managed, a: *const Managed, b: *const Managed) !void { - try r.ensureCapacity(math.max(a.len(), b.len())); + try r.ensureCapacity(@max(a.len(), b.len())); var m = r.toMutable(); m.bitOr(a.toConst(), b.toConst()); r.setMetadata(m.positive, m.len); @@ -3132,9 +3132,9 @@ pub const Managed = struct { /// r = a & b pub fn bitAnd(r: *Managed, a: *const Managed, b: *const Managed) !void { const cap = if (a.isPositive() or b.isPositive()) - math.min(a.len(), b.len()) + @min(a.len(), b.len()) else - math.max(a.len(), b.len()) + 1; + @max(a.len(), b.len()) + 1; try r.ensureCapacity(cap); var m = r.toMutable(); m.bitAnd(a.toConst(), b.toConst()); @@ -3143,7 +3143,7 @@ pub const Managed = struct { /// r = a ^ b pub fn bitXor(r: *Managed, a: *const Managed, b: *const Managed) !void { - var cap = math.max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive()); + var cap = @max(a.len(), b.len()) + @boolToInt(a.isPositive() != b.isPositive()); try r.ensureCapacity(cap); var m = r.toMutable(); @@ -3156,7 +3156,7 @@ pub const Managed = struct { /// /// rma's allocator is used for temporary storage to boost multiplication performance. pub fn gcd(rma: *Managed, x: *const Managed, y: *const Managed) !void { - try rma.ensureCapacity(math.min(x.len(), y.len())); + try rma.ensureCapacity(@min(x.len(), y.len())); var m = rma.toMutable(); var limbs_buffer = std.ArrayList(Limb).init(rma.allocator); defer limbs_buffer.deinit(); @@ -3356,13 +3356,13 @@ fn llmulaccKaratsuba( // For a1 and b1 we only need `limbs_after_split` limbs. const a1 = blk: { var a1 = a[split..]; - a1.len = math.min(llnormalize(a1), limbs_after_split); + a1.len = @min(llnormalize(a1), limbs_after_split); break :blk a1; }; const b1 = blk: { var b1 = b[split..]; - b1.len = math.min(llnormalize(b1), limbs_after_split); + b1.len = @min(llnormalize(b1), limbs_after_split); break :blk b1; }; @@ -3381,10 +3381,10 @@ fn llmulaccKaratsuba( // Compute p2. // Note, we don't need to compute all of p2, just enough limbs to satisfy r. - const p2_limbs = math.min(limbs_after_split, a1.len + b1.len); + const p2_limbs = @min(limbs_after_split, a1.len + b1.len); @memset(tmp[0..p2_limbs], 0); - llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..math.min(a1.len, p2_limbs)], b1[0..math.min(b1.len, p2_limbs)]); + llmulacc(.add, allocator, tmp[0..p2_limbs], a1[0..@min(a1.len, p2_limbs)], b1[0..@min(b1.len, p2_limbs)]); const p2 = tmp[0..llnormalize(tmp[0..p2_limbs])]; // Add p2 * B to the result. @@ -3392,7 +3392,7 @@ fn llmulaccKaratsuba( // Add p2 * B^2 to the result if required. if (limbs_after_split2 > 0) { - llaccum(op, r[split * 2 ..], p2[0..math.min(p2.len, limbs_after_split2)]); + llaccum(op, r[split * 2 ..], p2[0..@min(p2.len, limbs_after_split2)]); } // Compute p0. @@ -3406,13 +3406,13 @@ fn llmulaccKaratsuba( llaccum(op, r, p0); // Add p0 * B to the result. In this case, we may not need all of it. - llaccum(op, r[split..], p0[0..math.min(limbs_after_split, p0.len)]); + llaccum(op, r[split..], p0[0..@min(limbs_after_split, p0.len)]); // Finally, compute and add p1. // From now on we only need `limbs_after_split` limbs for a0 and b0, since the result of the // following computation will be added * B. - const a0x = a0[0..std.math.min(a0.len, limbs_after_split)]; - const b0x = b0[0..std.math.min(b0.len, limbs_after_split)]; + const a0x = a0[0..@min(a0.len, limbs_after_split)]; + const b0x = b0[0..@min(b0.len, limbs_after_split)]; const j0_sign = llcmp(a0x, a1); const j1_sign = llcmp(b1, b0x); @@ -3544,7 +3544,7 @@ fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) bool { return false; } - const split = std.math.min(y.len, acc.len); + const split = @min(y.len, acc.len); var a_lo = acc[0..split]; var a_hi = acc[split..]; @@ -4023,8 +4023,8 @@ fn llsignedand(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_ // r may alias. // a and b must not be -0. // Returns `true` when the result is positive. -// If the sign of a and b is equal, then r requires at least `max(a.len, b.len)` limbs are required. -// Otherwise, r requires at least `max(a.len, b.len) + 1` limbs. +// If the sign of a and b is equal, then r requires at least `@max(a.len, b.len)` limbs are required. +// Otherwise, r requires at least `@max(a.len, b.len) + 1` limbs. fn llsignedxor(r: []Limb, a: []const Limb, a_positive: bool, b: []const Limb, b_positive: bool) bool { @setRuntimeSafety(debug_safety); assert(a.len != 0 and b.len != 0); diff --git a/lib/std/math/ldexp.zig b/lib/std/math/ldexp.zig index d2fd8db9b7..8947475159 100644 --- a/lib/std/math/ldexp.zig +++ b/lib/std/math/ldexp.zig @@ -48,7 +48,7 @@ pub fn ldexp(x: anytype, n: i32) @TypeOf(x) { return @bitCast(T, sign_bit); // Severe underflow. Return +/- 0 // Result underflowed, we need to shift and round - const shift = @intCast(Log2Int(TBits), math.min(-n, -(exponent + n) + 1)); + const shift = @intCast(Log2Int(TBits), @min(-n, -(exponent + n) + 1)); const exact_tie: bool = @ctz(repr) == shift - 1; var result = repr & mantissa_mask; diff --git a/lib/std/mem.zig b/lib/std/mem.zig index c4ad708887..2f34745a64 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -596,7 +596,7 @@ pub fn sortUnstableContext(a: usize, b: usize, context: anytype) void { /// Compares two slices of numbers lexicographically. O(n). pub fn order(comptime T: type, lhs: []const T, rhs: []const T) math.Order { - const n = math.min(lhs.len, rhs.len); + const n = @min(lhs.len, rhs.len); var i: usize = 0; while (i < n) : (i += 1) { switch (math.order(lhs[i], rhs[i])) { @@ -642,7 +642,7 @@ pub fn eql(comptime T: type, a: []const T, b: []const T) bool { /// Compares two slices and returns the index of the first inequality. /// Returns null if the slices are equal. pub fn indexOfDiff(comptime T: type, a: []const T, b: []const T) ?usize { - const shortest = math.min(a.len, b.len); + const shortest = @min(a.len, b.len); if (a.ptr == b.ptr) return if (a.len == b.len) null else shortest; var index: usize = 0; @@ -3296,7 +3296,7 @@ pub fn min(comptime T: type, slice: []const T) T { assert(slice.len > 0); var best = slice[0]; for (slice[1..]) |item| { - best = math.min(best, item); + best = @min(best, item); } return best; } @@ -3313,7 +3313,7 @@ pub fn max(comptime T: type, slice: []const T) T { assert(slice.len > 0); var best = slice[0]; for (slice[1..]) |item| { - best = math.max(best, item); + best = @max(best, item); } return best; } @@ -3332,8 +3332,8 @@ pub fn minMax(comptime T: type, slice: []const T) struct { min: T, max: T } { var minVal = slice[0]; var maxVal = slice[0]; for (slice[1..]) |item| { - minVal = math.min(minVal, item); - maxVal = math.max(maxVal, item); + minVal = @min(minVal, item); + maxVal = @max(maxVal, item); } return .{ .min = minVal, .max = maxVal }; } diff --git a/lib/std/net.zig b/lib/std/net.zig index 64b13ec544..dfd6fe4a9e 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1482,11 +1482,11 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void { error.InvalidCharacter => continue, }; if (mem.eql(u8, name, "ndots")) { - rc.ndots = std.math.min(value, 15); + rc.ndots = @min(value, 15); } else if (mem.eql(u8, name, "attempts")) { - rc.attempts = std.math.min(value, 10); + rc.attempts = @min(value, 10); } else if (mem.eql(u8, name, "timeout")) { - rc.timeout = std.math.min(value, 60); + rc.timeout = @min(value, 60); } } } else if (mem.eql(u8, token, "nameserver")) { @@ -1615,7 +1615,7 @@ fn resMSendRc( } // Wait for a response, or until time to retry - const clamped_timeout = std.math.min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2); + const clamped_timeout = @min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2); const nevents = os.poll(&pfd, clamped_timeout) catch 0; if (nevents == 0) continue; diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index ef0ec94d3b..e4d6790505 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -317,7 +317,7 @@ pub fn getdents(fd: i32, dirp: [*]u8, len: usize) usize { .getdents, @bitCast(usize, @as(isize, fd)), @ptrToInt(dirp), - std.math.min(len, maxInt(c_int)), + @min(len, maxInt(c_int)), ); } @@ -326,7 +326,7 @@ pub fn getdents64(fd: i32, dirp: [*]u8, len: usize) usize { .getdents64, @bitCast(usize, @as(isize, fd)), @ptrToInt(dirp), - std.math.min(len, maxInt(c_int)), + @min(len, maxInt(c_int)), ); } diff --git a/lib/std/os/linux/io_uring.zig b/lib/std/os/linux/io_uring.zig index b7467d765f..0610b214d5 100644 --- a/lib/std/os/linux/io_uring.zig +++ b/lib/std/os/linux/io_uring.zig @@ -277,7 +277,7 @@ pub const IO_Uring = struct { fn copy_cqes_ready(self: *IO_Uring, cqes: []linux.io_uring_cqe, wait_nr: u32) u32 { _ = wait_nr; const ready = self.cq_ready(); - const count = std.math.min(cqes.len, ready); + const count = @min(cqes.len, ready); var head = self.cq.head.*; var tail = head +% count; // TODO Optimize this by using 1 or 2 memcpy's (if the tail wraps) rather than a loop. @@ -1093,7 +1093,7 @@ pub const SubmissionQueue = struct { pub fn init(fd: os.fd_t, p: linux.io_uring_params) !SubmissionQueue { assert(fd >= 0); assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0); - const size = std.math.max( + const size = @max( p.sq_off.array + p.sq_entries * @sizeOf(u32), p.cq_off.cqes + p.cq_entries * @sizeOf(linux.io_uring_cqe), ); diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig index e559e48915..389c4bea12 100644 --- a/lib/std/os/windows.zig +++ b/lib/std/os/windows.zig @@ -272,7 +272,7 @@ pub fn RtlGenRandom(output: []u8) RtlGenRandomError!void { const max_read_size: ULONG = maxInt(ULONG); while (total_read < output.len) { - const to_read: ULONG = math.min(buff.len, max_read_size); + const to_read: ULONG = @min(buff.len, max_read_size); if (advapi32.RtlGenRandom(buff.ptr, to_read) == 0) { return unexpectedError(kernel32.GetLastError()); @@ -501,7 +501,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, io_mode: std.io.Mo return @as(usize, bytes_transferred); } else { while (true) { - const want_read_count = @intCast(DWORD, math.min(@as(DWORD, maxInt(DWORD)), buffer.len)); + const want_read_count: DWORD = @min(@as(DWORD, maxInt(DWORD)), buffer.len); var amt_read: DWORD = undefined; var overlapped_data: OVERLAPPED = undefined; const overlapped: ?*OVERLAPPED = if (offset) |off| blk: { diff --git a/lib/std/pdb.zig b/lib/std/pdb.zig index 5bc836b08e..180507ba71 100644 --- a/lib/std/pdb.zig +++ b/lib/std/pdb.zig @@ -1049,7 +1049,7 @@ const MsfStream = struct { var size: usize = 0; var rem_buffer = buffer; while (size < buffer.len) { - const size_to_read = math.min(self.block_size - offset, rem_buffer.len); + const size_to_read = @min(self.block_size - offset, rem_buffer.len); size += try in.read(rem_buffer[0..size_to_read]); rem_buffer = buffer[size..]; offset += size_to_read; diff --git a/lib/std/rand.zig b/lib/std/rand.zig index 1e9f4051e9..f07562c911 100644 --- a/lib/std/rand.zig +++ b/lib/std/rand.zig @@ -410,7 +410,7 @@ pub const Random = struct { r.uintLessThan(T, sum) else if (comptime std.meta.trait.isFloat(T)) // take care that imprecision doesn't lead to a value slightly greater than sum - std.math.min(r.float(T) * sum, sum - std.math.floatEps(T)) + @min(r.float(T) * sum, sum - std.math.floatEps(T)) else @compileError("weightedIndex does not support proportions of type " ++ @typeName(T)); diff --git a/lib/std/sort/block.zig b/lib/std/sort/block.zig index 6c1be9c6c2..518d148a73 100644 --- a/lib/std/sort/block.zig +++ b/lib/std/sort/block.zig @@ -590,7 +590,7 @@ pub fn block( // whenever we leave an A block behind, we'll need to merge the previous A block with any B blocks that follow it, so track that information as well var lastA = firstA; var lastB = Range.init(0, 0); - var blockB = Range.init(B.start, B.start + math.min(block_size, B.length())); + var blockB = Range.init(B.start, B.start + @min(block_size, B.length())); blockA.start += firstA.length(); indexA = buffer1.start; @@ -849,7 +849,7 @@ fn findFirstForward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.start + skip; while (lessThan(context, items[index - 1], value)) : (index += skip) { @@ -871,7 +871,7 @@ fn findFirstBackward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.end - skip; while (index > range.start and !lessThan(context, items[index - 1], value)) : (index -= skip) { @@ -893,7 +893,7 @@ fn findLastForward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.start + skip; while (!lessThan(context, value, items[index - 1])) : (index += skip) { @@ -915,7 +915,7 @@ fn findLastBackward( comptime lessThan: fn (@TypeOf(context), lhs: T, rhs: T) bool, ) usize { if (range.length() == 0) return range.start; - const skip = math.max(range.length() / unique, @as(usize, 1)); + const skip = @max(range.length() / unique, @as(usize, 1)); var index = range.end - skip; while (index > range.start and lessThan(context, value, items[index - 1])) : (index -= skip) { diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 83fa68567f..3930c9714a 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -1960,7 +1960,7 @@ fn renderArrayInit( if (!this_contains_newline) { const column = column_counter % row_size; - column_widths[column] = std.math.max(column_widths[column], width); + column_widths[column] = @max(column_widths[column], width); const expr_last_token = tree.lastToken(expr) + 1; const next_expr = section_exprs[i + 1]; @@ -1980,7 +1980,7 @@ fn renderArrayInit( if (!contains_newline) { const column = column_counter % row_size; - column_widths[column] = std.math.max(column_widths[column], width); + column_widths[column] = @max(column_widths[column], width); } } } diff --git a/lib/std/zig/system/NativeTargetInfo.zig b/lib/std/zig/system/NativeTargetInfo.zig index f17356fdcd..cddaea2295 100644 --- a/lib/std/zig/system/NativeTargetInfo.zig +++ b/lib/std/zig/system/NativeTargetInfo.zig @@ -503,7 +503,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.builtin.Version { const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; - const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len); + const shstrtab_len = @min(shstrtab_size, strtab_buf.len); const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len); const shstrtab = strtab_buf[0..shstrtab_read_len]; const shnum = elfInt(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum); @@ -757,7 +757,7 @@ pub fn abiAndDynamicLinkerFromFile( const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset); const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size); var strtab_buf: [4096:0]u8 = undefined; - const shstrtab_len = std.math.min(shstrtab_size, strtab_buf.len); + const shstrtab_len = @min(shstrtab_size, strtab_buf.len); const shstrtab_read_len = try preadMin(file, &strtab_buf, shstrtab_off, shstrtab_len); const shstrtab = strtab_buf[0..shstrtab_read_len]; @@ -806,7 +806,7 @@ pub fn abiAndDynamicLinkerFromFile( const rpoff_file = ds.offset + rpoff_usize; const rp_max_size = ds.size - rpoff_usize; - const strtab_len = std.math.min(rp_max_size, strtab_buf.len); + const strtab_len = @min(rp_max_size, strtab_buf.len); const strtab_read_len = try preadMin(file, &strtab_buf, rpoff_file, strtab_len); const strtab = strtab_buf[0..strtab_read_len]; diff --git a/src/Sema.zig b/src/Sema.zig index 99ebd044f9..36fe5a6ee8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -22367,9 +22367,9 @@ fn analyzeShuffle( // to it up to the length of the longer vector. This recursion terminates // in 1 call because these calls to analyzeShuffle guarantee a_len == b_len. if (a_len != b_len) { - const min_len = std.math.min(a_len, b_len); + const min_len = @min(a_len, b_len); const max_src = if (a_len > b_len) a_src else b_src; - const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len)); + const max_len = try sema.usizeCast(block, max_src, @max(a_len, b_len)); const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| { @@ -31301,7 +31301,7 @@ fn cmpNumeric( } const dest_ty = if (dest_float_type) |ft| ft else blk: { - const max_bits = std.math.max(lhs_bits, rhs_bits); + const max_bits = @max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}); const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; break :blk try mod.intType(signedness, casted_bits); @@ -35828,7 +35828,7 @@ fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); @@ -35918,7 +35918,7 @@ fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 9d3fb67d1f..93454710dc 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -111,7 +111,7 @@ pub fn print( .val = val.castTag(.repeated).?.data, }; const len = ty.arrayLen(mod); - const max_len = std.math.min(len, max_aggregate_items); + const max_len = @min(len, max_aggregate_items); while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); try print(elem_tv, writer, level - 1, mod); @@ -130,7 +130,7 @@ pub fn print( const len = payload.len.toUnsignedInt(mod); if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); + const max_len: usize = @min(len, max_string_len); var buf: [max_string_len]u8 = undefined; var i: u32 = 0; @@ -149,7 +149,7 @@ pub fn print( try writer.writeAll(".{ "); - const max_len = std.math.min(len, max_aggregate_items); + const max_len = @min(len, max_aggregate_items); var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); @@ -455,7 +455,7 @@ fn printAggregate( const len = ty.arrayLen(mod); if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); + const max_len: usize = @min(len, max_string_len); var buf: [max_string_len]u8 = undefined; var i: u32 = 0; @@ -471,7 +471,7 @@ fn printAggregate( try writer.writeAll(".{ "); - const max_len = std.math.min(len, max_aggregate_items); + const max_len = @min(len, max_aggregate_items); var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index a1b57516ee..6d98ecce4f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2907,7 +2907,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { const dst_info = dst_ty.intInfo(mod); const src_ty = try mod.intType(dst_info.signedness, switch (tag) { else => unreachable, - .mul, .mulwrap => math.max3( + .mul, .mulwrap => @max( self.activeIntBits(bin_op.lhs), self.activeIntBits(bin_op.rhs), dst_info.bits / 2, @@ -3349,7 +3349,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_active_bits = self.activeIntBits(bin_op.lhs); const rhs_active_bits = self.activeIntBits(bin_op.rhs); - const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); + const src_bits = @max(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); const src_ty = try mod.intType(dst_info.signedness, src_bits); const lhs = try self.resolveInst(bin_op.lhs); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 409eca6e7a..0863a22fac 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2326,7 +2326,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme self.debug_aranges_section_dirty = true; } } - shdr.sh_addralign = math.max(shdr.sh_addralign, alignment); + shdr.sh_addralign = @max(shdr.sh_addralign, alignment); // This function can also reallocate an atom. // In this case we need to "unplug" it from its previous location before diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 59b3e50b07..4709560ba7 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -99,7 +99,7 @@ const CodeDirectory = struct { fn addSpecialHash(self: *CodeDirectory, index: u32, hash: [hash_size]u8) void { assert(index > 0); - self.inner.nSpecialSlots = std.math.max(self.inner.nSpecialSlots, index); + self.inner.nSpecialSlots = @max(self.inner.nSpecialSlots, index); self.special_slots[index - 1] = hash; } @@ -426,11 +426,11 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { var n_special_slots: u32 = 0; if (self.requirements) |req| { ssize += @sizeOf(macho.BlobIndex) + req.size(); - n_special_slots = std.math.max(n_special_slots, req.slotType()); + n_special_slots = @max(n_special_slots, req.slotType()); } if (self.entitlements) |ent| { ssize += @sizeOf(macho.BlobIndex) + ent.size() + hash_size; - n_special_slots = std.math.max(n_special_slots, ent.slotType()); + n_special_slots = @max(n_special_slots, ent.slotType()); } if (self.signature) |sig| { ssize += @sizeOf(macho.BlobIndex) + sig.size(); diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index b218fdbd2d..105a806075 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -530,7 +530,7 @@ pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void { sect.addr + sect.size - addr; const atom_align = if (addr > 0) - math.min(@ctz(addr), sect.@"align") + @min(@ctz(addr), sect.@"align") else sect.@"align"; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index fdac7dfa63..5126033995 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2027,7 +2027,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void { }; const segment: *Segment = &wasm.segments.items[final_index]; - segment.alignment = std.math.max(segment.alignment, atom.alignment); + segment.alignment = @max(segment.alignment, atom.alignment); try wasm.appendAtomAtIndex(final_index, atom_index); } diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig index 363648971a..33f54dece5 100644 --- a/src/link/Wasm/Object.zig +++ b/src/link/Wasm/Object.zig @@ -979,7 +979,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b const segment: *Wasm.Segment = &wasm_bin.segments.items[final_index]; if (relocatable_data.type == .data) { //code section and debug sections are 1-byte aligned - segment.alignment = std.math.max(segment.alignment, atom.alignment); + segment.alignment = @max(segment.alignment, atom.alignment); } try wasm_bin.appendAtomAtIndex(final_index, atom_index); diff --git a/src/main.zig b/src/main.zig index 5d666840c0..aedca80d26 100644 --- a/src/main.zig +++ b/src/main.zig @@ -5391,7 +5391,7 @@ fn gimmeMoreOfThoseSweetSweetFileDescriptors() void { // setrlimit() now returns with errno set to EINVAL in places that historically succeeded. // It no longer accepts "rlim_cur = RLIM.INFINITY" for RLIM.NOFILE. // Use "rlim_cur = min(OPEN_MAX, rlim_max)". - lim.max = std.math.min(std.os.darwin.OPEN_MAX, lim.max); + lim.max = @min(std.os.darwin.OPEN_MAX, lim.max); } if (lim.cur == lim.max) return; diff --git a/src/translate_c.zig b/src/translate_c.zig index 8cc2d1856c..67176ff74b 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -2400,7 +2400,7 @@ fn transStringLiteralInitializer( if (array_size == 0) return Tag.empty_array.create(c.arena, elem_type); - const num_inits = math.min(str_length, array_size); + const num_inits = @min(str_length, array_size); const init_node = if (num_inits > 0) blk: { if (is_narrow) { // "string literal".* or string literal"[0..num_inits].* diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index 6c6bbf28bd..443c56a84a 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -1824,7 +1824,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { }, .switch_prong => { const payload = node.castTag(.switch_prong).?.data; - var items = try c.gpa.alloc(NodeIndex, std.math.max(payload.cases.len, 1)); + var items = try c.gpa.alloc(NodeIndex, @max(payload.cases.len, 1)); defer c.gpa.free(items); items[0] = 0; for (payload.cases, 0..) |item, i| { @@ -1973,7 +1973,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const payload = node.castTag(.tuple).?.data; _ = try c.addToken(.period, "."); const l_brace = try c.addToken(.l_brace, "{"); - var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.len, 2)); + var inits = try c.gpa.alloc(NodeIndex, @max(payload.len, 2)); defer c.gpa.free(inits); inits[0] = 0; inits[1] = 0; @@ -2007,7 +2007,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const payload = node.castTag(.container_init_dot).?.data; _ = try c.addToken(.period, "."); const l_brace = try c.addToken(.l_brace, "{"); - var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.len, 2)); + var inits = try c.gpa.alloc(NodeIndex, @max(payload.len, 2)); defer c.gpa.free(inits); inits[0] = 0; inits[1] = 0; @@ -2046,7 +2046,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { const lhs = try renderNode(c, payload.lhs); const l_brace = try c.addToken(.l_brace, "{"); - var inits = try c.gpa.alloc(NodeIndex, std.math.max(payload.inits.len, 1)); + var inits = try c.gpa.alloc(NodeIndex, @max(payload.inits.len, 1)); defer c.gpa.free(inits); inits[0] = 0; for (payload.inits, 0..) |init, i| { @@ -2102,7 +2102,7 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex { const num_vars = payload.variables.len; const num_funcs = payload.functions.len; const total_members = payload.fields.len + num_vars + num_funcs; - const members = try c.gpa.alloc(NodeIndex, std.math.max(total_members, 2)); + const members = try c.gpa.alloc(NodeIndex, @max(total_members, 2)); defer c.gpa.free(members); members[0] = 0; members[1] = 0; @@ -2195,7 +2195,7 @@ fn renderFieldAccess(c: *Context, lhs: NodeIndex, field_name: []const u8) !NodeI fn renderArrayInit(c: *Context, lhs: NodeIndex, inits: []const Node) !NodeIndex { const l_brace = try c.addToken(.l_brace, "{"); - var rendered = try c.gpa.alloc(NodeIndex, std.math.max(inits.len, 1)); + var rendered = try c.gpa.alloc(NodeIndex, @max(inits.len, 1)); defer c.gpa.free(rendered); rendered[0] = 0; for (inits, 0..) |init, i| { @@ -2904,7 +2904,7 @@ fn renderMacroFunc(c: *Context, node: Node) !NodeIndex { fn renderParams(c: *Context, params: []Payload.Param, is_var_args: bool) !std.ArrayList(NodeIndex) { _ = try c.addToken(.l_paren, "("); - var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, std.math.max(params.len, 1)); + var rendered = try std.ArrayList(NodeIndex).initCapacity(c.gpa, @max(params.len, 1)); errdefer rendered.deinit(); for (params, 0..) |param, i| { diff --git a/src/type.zig b/src/type.zig index 22523a7141..bb82a50682 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1633,7 +1633,7 @@ pub const Type = struct { const len = array_type.len + @boolToInt(array_type.sentinel != .none); if (len == 0) return 0; const elem_ty = array_type.child.toType(); - const elem_size = std.math.max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); + const elem_size = @max(elem_ty.abiAlignment(mod), elem_ty.abiSize(mod)); if (elem_size == 0) return 0; const elem_bit_size = try bitSizeAdvanced(elem_ty, mod, opt_sema); return (len - 1) * 8 * elem_size + elem_bit_size; diff --git a/src/value.zig b/src/value.zig index 85204e2b10..8590aa8872 100644 --- a/src/value.zig +++ b/src/value.zig @@ -2458,7 +2458,7 @@ pub const Value = struct { const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, - std.math.max( + @max( // For the saturate std.math.big.int.calcTwosCompLimbCount(info.bits), lhs_bigint.limbs.len + rhs_bigint.limbs.len, @@ -2572,7 +2572,7 @@ pub const Value = struct { const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); @@ -2638,7 +2638,7 @@ pub const Value = struct { const rhs_bigint = rhs.toBigInt(&rhs_space, mod); const limbs = try arena.alloc( std.math.big.Limb, - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); @@ -2677,7 +2677,7 @@ pub const Value = struct { const limbs = try arena.alloc( std.math.big.Limb, // + 1 for negatives - std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); -- cgit v1.2.3 From d41111d7ef531f6f55a19c56205d6d2f1134c224 Mon Sep 17 00:00:00 2001 From: Motiejus Jakštys Date: Fri, 9 Jun 2023 16:02:18 -0700 Subject: mem: rename align*Generic to mem.align* Anecdote 1: The generic version is way more popular than the non-generic one in Zig codebase: git grep -w alignForward | wc -l 56 git grep -w alignForwardGeneric | wc -l 149 git grep -w alignBackward | wc -l 6 git grep -w alignBackwardGeneric | wc -l 15 Anecdote 2: In my project (turbonss) that does much arithmetic and alignment I exclusively use the Generic functions. Anecdote 3: we used only the Generic versions in the Macho Man's linker workshop. --- lib/std/Thread.zig | 8 ++--- lib/std/dynamic_library.zig | 4 +-- lib/std/hash_map.zig | 12 +++---- lib/std/heap.zig | 8 ++--- lib/std/heap/PageAllocator.zig | 12 +++---- lib/std/heap/WasmPageAllocator.zig | 6 ++-- lib/std/heap/arena_allocator.zig | 2 +- lib/std/heap/general_purpose_allocator.zig | 1 + lib/std/mem.zig | 56 ++++++++++++------------------ lib/std/mem/Allocator.zig | 4 +-- lib/std/meta/trailer_flags.zig | 6 ++-- lib/std/os/linux/tls.zig | 8 ++--- lib/std/os/uefi/pool_allocator.zig | 4 +-- lib/std/tar.zig | 2 +- lib/std/target.zig | 2 +- lib/std/testing.zig | 2 +- src/Module.zig | 10 +++--- src/arch/aarch64/CodeGen.zig | 6 ++-- src/arch/arm/CodeGen.zig | 10 +++--- src/arch/arm/abi.zig | 2 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 4 +-- src/arch/wasm/CodeGen.zig | 8 ++--- src/arch/x86_64/CodeGen.zig | 10 +++--- src/codegen.zig | 8 ++--- src/codegen/llvm.zig | 44 +++++++++++------------ src/codegen/spirv.zig | 4 +-- src/link/Coff.zig | 30 ++++++++-------- src/link/Dwarf.zig | 2 +- src/link/Elf.zig | 10 +++--- src/link/MachO.zig | 50 +++++++++++++------------- src/link/MachO/CodeSignature.zig | 8 ++--- src/link/MachO/DebugSymbols.zig | 18 +++++----- src/link/MachO/load_commands.zig | 8 ++--- src/link/MachO/thunks.zig | 6 ++-- src/link/MachO/zld.zig | 34 +++++++++--------- src/link/Wasm.zig | 16 ++++----- src/objcopy.zig | 6 ++-- src/type.zig | 22 ++++++------ 39 files changed, 223 insertions(+), 232 deletions(-) (limited to 'src/arch') diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 76650a9072..d7bcbee66f 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -931,18 +931,18 @@ const LinuxThreadImpl = struct { guard_offset = bytes; bytes += @max(page_size, config.stack_size); - bytes = std.mem.alignForward(bytes, page_size); + bytes = std.mem.alignForward(usize, bytes, page_size); stack_offset = bytes; - bytes = std.mem.alignForward(bytes, linux.tls.tls_image.alloc_align); + bytes = std.mem.alignForward(usize, bytes, linux.tls.tls_image.alloc_align); tls_offset = bytes; bytes += linux.tls.tls_image.alloc_size; - bytes = std.mem.alignForward(bytes, @alignOf(Instance)); + bytes = std.mem.alignForward(usize, bytes, @alignOf(Instance)); instance_offset = bytes; bytes += @sizeOf(Instance); - bytes = std.mem.alignForward(bytes, page_size); + bytes = std.mem.alignForward(usize, bytes, page_size); break :blk bytes; }; diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 94da2f4d6d..928d0cc9c3 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -124,7 +124,7 @@ pub const ElfDynLib = struct { // corresponding to the actual LOAD sections. const file_bytes = try os.mmap( null, - mem.alignForward(size, mem.page_size), + mem.alignForward(usize, size, mem.page_size), os.PROT.READ, os.MAP.PRIVATE, fd, @@ -187,7 +187,7 @@ pub const ElfDynLib = struct { // extra nonsense mapped before/after the VirtAddr,MemSiz const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, mem.page_size) - 1); const extra_bytes = (base + ph.p_vaddr) - aligned_addr; - const extended_memsz = mem.alignForward(ph.p_memsz + extra_bytes, mem.page_size); + const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, mem.page_size); const ptr = @intToPtr([*]align(mem.page_size) u8, aligned_addr); const prot = elfToMmapProt(ph.p_flags); if ((ph.p_flags & elf.PF_W) == 0) { diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 5b539ddaad..8c05dfeca5 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1545,13 +1545,13 @@ pub fn HashMapUnmanaged( const meta_size = @sizeOf(Header) + new_capacity * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); - const keys_start = std.mem.alignForward(meta_size, key_align); + const keys_start = std.mem.alignForward(usize, meta_size, key_align); const keys_end = keys_start + new_capacity * @sizeOf(K); - const vals_start = std.mem.alignForward(keys_end, val_align); + const vals_start = std.mem.alignForward(usize, keys_end, val_align); const vals_end = vals_start + new_capacity * @sizeOf(V); - const total_size = std.mem.alignForward(vals_end, max_align); + const total_size = std.mem.alignForward(usize, vals_end, max_align); const slice = try allocator.alignedAlloc(u8, max_align, total_size); const ptr = @ptrToInt(slice.ptr); @@ -1581,13 +1581,13 @@ pub fn HashMapUnmanaged( const meta_size = @sizeOf(Header) + cap * @sizeOf(Metadata); comptime assert(@alignOf(Metadata) == 1); - const keys_start = std.mem.alignForward(meta_size, key_align); + const keys_start = std.mem.alignForward(usize, meta_size, key_align); const keys_end = keys_start + cap * @sizeOf(K); - const vals_start = std.mem.alignForward(keys_end, val_align); + const vals_start = std.mem.alignForward(usize, keys_end, val_align); const vals_end = vals_start + cap * @sizeOf(V); - const total_size = std.mem.alignForward(vals_end, max_align); + const total_size = std.mem.alignForward(usize, vals_end, max_align); const slice = @intToPtr([*]align(max_align) u8, @ptrToInt(self.header()))[0..total_size]; allocator.free(slice); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index 7d2a66df1e..7b4bf3af21 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -83,7 +83,7 @@ const CAllocator = struct { // the aligned address. var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null); const unaligned_addr = @ptrToInt(unaligned_ptr); - const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), alignment); + const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), alignment); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); getHeader(aligned_ptr).* = unaligned_ptr; @@ -249,7 +249,7 @@ pub const wasm_allocator = Allocator{ /// Verifies that the adjusted length will still map to the full length pub fn alignPageAllocLen(full_len: usize, len: usize) usize { const aligned_len = mem.alignAllocLen(full_len, len); - assert(mem.alignForward(aligned_len, mem.page_size) == full_len); + assert(mem.alignForward(usize, aligned_len, mem.page_size) == full_len); return aligned_len; } @@ -307,7 +307,7 @@ pub const HeapAllocator = switch (builtin.os.tag) { }; const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null; const root_addr = @ptrToInt(ptr); - const aligned_addr = mem.alignForward(root_addr, ptr_align); + const aligned_addr = mem.alignForward(usize, root_addr, ptr_align); const buf = @intToPtr([*]u8, aligned_addr)[0..n]; getRecordPtr(buf).* = root_addr; return buf.ptr; @@ -840,7 +840,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void { // which is 16 pages, hence the 32. This test may require to increase // the size of the allocations feeding the `allocator` parameter if they // fail, because of this high over-alignment we want to have. - while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), mem.page_size * 32)) { + while (@ptrToInt(slice.ptr) == mem.alignForward(usize, @ptrToInt(slice.ptr), mem.page_size * 32)) { try stuff_to_free.append(slice); slice = try allocator.alignedAlloc(u8, 16, alloc_size); } diff --git a/lib/std/heap/PageAllocator.zig b/lib/std/heap/PageAllocator.zig index 2c8146caf3..5da570fa42 100644 --- a/lib/std/heap/PageAllocator.zig +++ b/lib/std/heap/PageAllocator.zig @@ -17,7 +17,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 { _ = log2_align; assert(n > 0); if (n > maxInt(usize) - (mem.page_size - 1)) return null; - const aligned_len = mem.alignForward(n, mem.page_size); + const aligned_len = mem.alignForward(usize, n, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; @@ -54,14 +54,14 @@ fn resize( ) bool { _ = log2_buf_align; _ = return_address; - const new_size_aligned = mem.alignForward(new_size, mem.page_size); + const new_size_aligned = mem.alignForward(usize, new_size, mem.page_size); if (builtin.os.tag == .windows) { const w = os.windows; if (new_size <= buf_unaligned.len) { const base_addr = @ptrToInt(buf_unaligned.ptr); const old_addr_end = base_addr + buf_unaligned.len; - const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size); + const new_addr_end = mem.alignForward(usize, base_addr + new_size, mem.page_size); if (old_addr_end > new_addr_end) { // For shrinking that is not releasing, we will only // decommit the pages not needed anymore. @@ -73,14 +73,14 @@ fn resize( } return true; } - const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size); + const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, mem.page_size); if (new_size_aligned <= old_size_aligned) { return true; } return false; } - const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size); + const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, mem.page_size); if (new_size_aligned == buf_aligned_len) return true; @@ -103,7 +103,7 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v if (builtin.os.tag == .windows) { os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE); } else { - const buf_aligned_len = mem.alignForward(slice.len, mem.page_size); + const buf_aligned_len = mem.alignForward(usize, slice.len, mem.page_size); const ptr = @alignCast(mem.page_size, slice.ptr); os.munmap(ptr[0..buf_aligned_len]); } diff --git a/lib/std/heap/WasmPageAllocator.zig b/lib/std/heap/WasmPageAllocator.zig index 1370af022c..63ae226196 100644 --- a/lib/std/heap/WasmPageAllocator.zig +++ b/lib/std/heap/WasmPageAllocator.zig @@ -100,7 +100,7 @@ fn extendedOffset() usize { } fn nPages(memsize: usize) usize { - return mem.alignForward(memsize, mem.page_size) / mem.page_size; + return mem.alignForward(usize, memsize, mem.page_size) / mem.page_size; } fn alloc(ctx: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 { @@ -170,7 +170,7 @@ fn resize( _ = ctx; _ = log2_buf_align; _ = return_address; - const aligned_len = mem.alignForward(buf.len, mem.page_size); + const aligned_len = mem.alignForward(usize, buf.len, mem.page_size); if (new_len > aligned_len) return false; const current_n = nPages(aligned_len); const new_n = nPages(new_len); @@ -190,7 +190,7 @@ fn free( _ = ctx; _ = log2_buf_align; _ = return_address; - const aligned_len = mem.alignForward(buf.len, mem.page_size); + const aligned_len = mem.alignForward(usize, buf.len, mem.page_size); const current_n = nPages(aligned_len); const base = nPages(@ptrToInt(buf.ptr)); freePages(base, base + current_n); diff --git a/lib/std/heap/arena_allocator.zig b/lib/std/heap/arena_allocator.zig index c7e0569067..f858510bcf 100644 --- a/lib/std/heap/arena_allocator.zig +++ b/lib/std/heap/arena_allocator.zig @@ -186,7 +186,7 @@ pub const ArenaAllocator = struct { const cur_alloc_buf = @ptrCast([*]u8, cur_node)[0..cur_node.data]; const cur_buf = cur_alloc_buf[@sizeOf(BufNode)..]; const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index; - const adjusted_addr = mem.alignForward(addr, ptr_align); + const adjusted_addr = mem.alignForward(usize, addr, ptr_align); const adjusted_index = self.state.end_index + (adjusted_addr - addr); const new_end_index = adjusted_index + n; diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index ef88787fc6..51b6c1744f 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -309,6 +309,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { fn bucketStackFramesStart(size_class: usize) usize { return mem.alignForward( + usize, @sizeOf(BucketHeader) + usedBitsCount(size_class), @alignOf(usize), ); diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 87f436d156..23e24b0c09 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -4213,23 +4213,17 @@ test "sliceAsBytes preserves pointer attributes" { /// Round an address up to the next (or current) aligned address. /// The alignment must be a power of 2 and greater than 0. /// Asserts that rounding up the address does not cause integer overflow. -pub fn alignForward(addr: usize, alignment: usize) usize { - return alignForwardGeneric(usize, addr, alignment); +pub fn alignForward(comptime T: type, addr: T, alignment: T) T { + assert(isValidAlignGeneric(T, alignment)); + return alignBackward(T, addr + (alignment - 1), alignment); } pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize { const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment); - return alignForward(addr, alignment); + return alignForward(usize, addr, alignment); } -/// Round an address up to the next (or current) aligned address. -/// The alignment must be a power of 2 and greater than 0. -/// Asserts that rounding up the address does not cause integer overflow. -pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T { - assert(alignment > 0); - assert(std.math.isPowerOfTwo(alignment)); - return alignBackwardGeneric(T, addr + (alignment - 1), alignment); -} +pub const alignForwardGeneric = @compileError("renamed to alignForward"); /// Force an evaluation of the expression; this tries to prevent /// the compiler from optimizing the computation away even if the @@ -4322,38 +4316,32 @@ test "doNotOptimizeAway" { } test "alignForward" { - try testing.expect(alignForward(1, 1) == 1); - try testing.expect(alignForward(2, 1) == 2); - try testing.expect(alignForward(1, 2) == 2); - try testing.expect(alignForward(2, 2) == 2); - try testing.expect(alignForward(3, 2) == 4); - try testing.expect(alignForward(4, 2) == 4); - try testing.expect(alignForward(7, 8) == 8); - try testing.expect(alignForward(8, 8) == 8); - try testing.expect(alignForward(9, 8) == 16); - try testing.expect(alignForward(15, 8) == 16); - try testing.expect(alignForward(16, 8) == 16); - try testing.expect(alignForward(17, 8) == 24); + try testing.expect(alignForward(usize, 1, 1) == 1); + try testing.expect(alignForward(usize, 2, 1) == 2); + try testing.expect(alignForward(usize, 1, 2) == 2); + try testing.expect(alignForward(usize, 2, 2) == 2); + try testing.expect(alignForward(usize, 3, 2) == 4); + try testing.expect(alignForward(usize, 4, 2) == 4); + try testing.expect(alignForward(usize, 7, 8) == 8); + try testing.expect(alignForward(usize, 8, 8) == 8); + try testing.expect(alignForward(usize, 9, 8) == 16); + try testing.expect(alignForward(usize, 15, 8) == 16); + try testing.expect(alignForward(usize, 16, 8) == 16); + try testing.expect(alignForward(usize, 17, 8) == 24); } /// Round an address down to the previous (or current) aligned address. /// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2. pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize { if (isValidAlign(alignment)) - return alignBackward(i, alignment); + return alignBackward(usize, i, alignment); assert(alignment != 0); return i - @mod(i, alignment); } /// Round an address down to the previous (or current) aligned address. /// The alignment must be a power of 2 and greater than 0. -pub fn alignBackward(addr: usize, alignment: usize) usize { - return alignBackwardGeneric(usize, addr, alignment); -} - -/// Round an address down to the previous (or current) aligned address. -/// The alignment must be a power of 2 and greater than 0. -pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T { +pub fn alignBackward(comptime T: type, addr: T, alignment: T) T { assert(isValidAlignGeneric(T, alignment)); // 000010000 // example alignment // 000001111 // subtract 1 @@ -4361,6 +4349,8 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T { return addr & ~(alignment - 1); } +pub const alignBackwardGeneric = @compileError("renamed to alignBackward"); + /// Returns whether `alignment` is a valid alignment, meaning it is /// a positive power of 2. pub fn isValidAlign(alignment: usize) bool { @@ -4391,7 +4381,7 @@ pub fn isAligned(addr: usize, alignment: usize) bool { } pub fn isAlignedGeneric(comptime T: type, addr: T, alignment: T) bool { - return alignBackwardGeneric(T, addr, alignment) == addr; + return alignBackward(T, addr, alignment) == addr; } test "isAligned" { @@ -4439,7 +4429,7 @@ pub fn alignInBytes(bytes: []u8, comptime new_alignment: usize) ?[]align(new_ali const begin_address = @ptrToInt(bytes.ptr); const end_address = begin_address + bytes.len; - const begin_address_aligned = mem.alignForward(begin_address, new_alignment); + const begin_address_aligned = mem.alignForward(usize, begin_address, new_alignment); const new_length = std.math.sub(usize, end_address, begin_address_aligned) catch |e| switch (e) { error.Overflow => return null, }; diff --git a/lib/std/mem/Allocator.zig b/lib/std/mem/Allocator.zig index 5110534ed4..4a1ff86721 100644 --- a/lib/std/mem/Allocator.zig +++ b/lib/std/mem/Allocator.zig @@ -208,7 +208,7 @@ pub fn allocAdvancedWithRetAddr( comptime assert(a <= mem.page_size); if (n == 0) { - const ptr = comptime std.mem.alignBackward(math.maxInt(usize), a); + const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), a); return @intToPtr([*]align(a) T, ptr)[0..0]; } @@ -267,7 +267,7 @@ pub fn reallocAdvanced( } if (new_n == 0) { self.free(old_mem); - const ptr = comptime std.mem.alignBackward(math.maxInt(usize), Slice.alignment); + const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), Slice.alignment); return @intToPtr([*]align(Slice.alignment) T, ptr)[0..0]; } diff --git a/lib/std/meta/trailer_flags.zig b/lib/std/meta/trailer_flags.zig index 0c43a5ff28..a4d83dcbb3 100644 --- a/lib/std/meta/trailer_flags.zig +++ b/lib/std/meta/trailer_flags.zig @@ -105,9 +105,9 @@ pub fn TrailerFlags(comptime Fields: type) type { const active = (self.bits & (1 << i)) != 0; if (i == @enumToInt(field)) { assert(active); - return mem.alignForwardGeneric(usize, off, @alignOf(field_info.type)); + return mem.alignForward(usize, off, @alignOf(field_info.type)); } else if (active) { - off = mem.alignForwardGeneric(usize, off, @alignOf(field_info.type)); + off = mem.alignForward(usize, off, @alignOf(field_info.type)); off += @sizeOf(field_info.type); } } @@ -123,7 +123,7 @@ pub fn TrailerFlags(comptime Fields: type) type { if (@sizeOf(field.type) == 0) continue; if ((self.bits & (1 << i)) != 0) { - off = mem.alignForwardGeneric(usize, off, @alignOf(field.type)); + off = mem.alignForward(usize, off, @alignOf(field.type)); off += @sizeOf(field.type); } } diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig index 311e5609e8..d765e403c8 100644 --- a/lib/std/os/linux/tls.zig +++ b/lib/std/os/linux/tls.zig @@ -233,7 +233,7 @@ fn initTLS(phdrs: []elf.Phdr) void { l += tls_align_factor - delta; l += @sizeOf(CustomData); tcb_offset = l; - l += mem.alignForward(tls_tcb_size, tls_align_factor); + l += mem.alignForward(usize, tls_tcb_size, tls_align_factor); data_offset = l; l += tls_data_alloc_size; break :blk l; @@ -241,14 +241,14 @@ fn initTLS(phdrs: []elf.Phdr) void { .VariantII => blk: { var l: usize = 0; data_offset = l; - l += mem.alignForward(tls_data_alloc_size, tls_align_factor); + l += mem.alignForward(usize, tls_data_alloc_size, tls_align_factor); // The thread pointer is aligned to p_align tcb_offset = l; l += tls_tcb_size; // The CustomData structure is right after the TCB with no padding // in between so it can be easily found l += @sizeOf(CustomData); - l = mem.alignForward(l, @alignOf(DTV)); + l = mem.alignForward(usize, l, @alignOf(DTV)); dtv_offset = l; l += @sizeOf(DTV); break :blk l; @@ -329,7 +329,7 @@ pub fn initStaticTLS(phdrs: []elf.Phdr) void { // Make sure the slice is correctly aligned. const begin_addr = @ptrToInt(alloc_tls_area.ptr); - const begin_aligned_addr = mem.alignForward(begin_addr, tls_image.alloc_align); + const begin_aligned_addr = mem.alignForward(usize, begin_addr, tls_image.alloc_align); const start = begin_aligned_addr - begin_addr; break :blk alloc_tls_area[start .. start + tls_image.alloc_size]; }; diff --git a/lib/std/os/uefi/pool_allocator.zig b/lib/std/os/uefi/pool_allocator.zig index 8f26aac32c..00b8941974 100644 --- a/lib/std/os/uefi/pool_allocator.zig +++ b/lib/std/os/uefi/pool_allocator.zig @@ -24,7 +24,7 @@ const UefiPoolAllocator = struct { const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align); - const metadata_len = mem.alignForward(@sizeOf(usize), ptr_align); + const metadata_len = mem.alignForward(usize, @sizeOf(usize), ptr_align); const full_len = metadata_len + len; @@ -32,7 +32,7 @@ const UefiPoolAllocator = struct { if (uefi.system_table.boot_services.?.allocatePool(uefi.efi_pool_memory_type, full_len, &unaligned_ptr) != .Success) return null; const unaligned_addr = @ptrToInt(unaligned_ptr); - const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), ptr_align); + const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), ptr_align); var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr); getHeader(aligned_ptr).* = unaligned_ptr; diff --git a/lib/std/tar.zig b/lib/std/tar.zig index c570c8e09c..14a9ce5d3f 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -116,7 +116,7 @@ pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !voi const header: Header = .{ .bytes = buffer[start..][0..512] }; start += 512; const file_size = try header.fileSize(); - const rounded_file_size = std.mem.alignForwardGeneric(u64, file_size, 512); + const rounded_file_size = std.mem.alignForward(u64, file_size, 512); const pad_len = @intCast(usize, rounded_file_size - file_size); const unstripped_file_name = try header.fullFileName(&file_name_buffer); switch (header.fileType()) { diff --git a/lib/std/target.zig b/lib/std/target.zig index 15bb65cd4b..4c7bcfc37a 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -1944,7 +1944,7 @@ pub const Target = struct { 16 => 2, 32 => 4, 64 => 8, - 80 => @intCast(u16, mem.alignForward(10, c_type_alignment(t, .longdouble))), + 80 => @intCast(u16, mem.alignForward(usize, 10, c_type_alignment(t, .longdouble))), 128 => 16, else => unreachable, }, diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 6b1e0bb640..bbb0905121 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -305,7 +305,7 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const var window_start: usize = 0; if (@max(actual.len, expected.len) > max_window_size) { const alignment = if (T == u8) 16 else 2; - window_start = std.mem.alignBackward(diff_index - @min(diff_index, alignment), alignment); + window_start = std.mem.alignBackward(usize, diff_index - @min(diff_index, alignment), alignment); } const expected_window = expected[window_start..@min(expected.len, window_start + max_window_size)]; const expected_truncated = window_start + expected_window.len < expected.len; diff --git a/src/Module.zig b/src/Module.zig index 8c5a86652d..8d9f9593dd 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1293,7 +1293,7 @@ pub const Union = struct { payload_align = @max(payload_align, 1); if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) { return .{ - .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align), + .abi_size = std.mem.alignForward(u64, payload_size, payload_align), .abi_align = payload_align, .most_aligned_field = most_aligned_field, .most_aligned_field_size = most_aligned_field_size, @@ -1314,18 +1314,18 @@ pub const Union = struct { if (tag_align >= payload_align) { // {Tag, Payload} size += tag_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); size += payload_size; const prev_size = size; - size = std.mem.alignForwardGeneric(u64, size, tag_align); + size = std.mem.alignForward(u64, size, tag_align); padding = @intCast(u32, size - prev_size); } else { // {Payload, Tag} size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, tag_align); + size = std.mem.alignForward(u64, size, tag_align); size += tag_size; const prev_size = size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); padding = @intCast(u32, size - prev_size); } return .{ diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index dd752555b7..1355f96231 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -566,7 +566,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + self.saved_regs_stack_space; - const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const aligned_total_stack_end = mem.alignForward(u32, total_stack_size, self.stack_align); const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; self.max_end_stack = stack_size; if (math.cast(u12, stack_size)) |size| { @@ -1011,7 +1011,7 @@ fn allocMem( std.math.ceilPowerOfTwoAssert(u32, abi_size); // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, adjusted_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, adjusted_align) + abi_size; self.next_stack_offset = offset; self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); @@ -6328,7 +6328,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_alignment = ty.toType().abiAlignment(mod); - stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); + stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; stack_offset += param_size; } else { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 69a156999b..a2a5a3d4d3 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -560,7 +560,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + self.saved_regs_stack_space; - const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const aligned_total_stack_end = mem.alignForward(u32, total_stack_size, self.stack_align); const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; self.max_end_stack = stack_size; self.mir_instructions.set(sub_reloc, .{ @@ -991,7 +991,7 @@ fn allocMem( assert(abi_align > 0); // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size; self.next_stack_offset = offset; self.max_end_stack = @max(self.max_end_stack, self.next_stack_offset); @@ -6214,7 +6214,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { for (fn_info.param_types, 0..) |ty, i| { if (ty.toType().abiAlignment(mod) == 8) - ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); + ncrn = std.mem.alignForward(usize, ncrn, 2); const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { @@ -6229,7 +6229,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else { ncrn = 4; if (ty.toType().abiAlignment(mod) == 8) - nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); + nsaa = std.mem.alignForward(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; nsaa += param_size; @@ -6267,7 +6267,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const param_size = @intCast(u32, ty.toType().abiSize(mod)); const param_alignment = ty.toType().abiAlignment(mod); - stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); + stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; stack_offset += param_size; } else { diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index e4a07f22bf..a4a4fe472b 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -13,7 +13,7 @@ pub const Class = union(enum) { i64_array: u8, fn arrSize(total_size: u64, arr_size: u64) Class { - const count = @intCast(u8, std.mem.alignForwardGeneric(u64, total_size, arr_size) / arr_size); + const count = @intCast(u8, std.mem.alignForward(u64, total_size, arr_size) / arr_size); if (arr_size == 32) { return .{ .i32_array = count }; } else { diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 809c388532..c6ac3255c6 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -792,7 +792,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align); + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align); self.next_stack_offset = offset + abi_size; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index b660126604..e339794fd4 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -423,7 +423,7 @@ fn gen(self: *Self) !void { // Backpatch stack offset const total_stack_size = self.max_end_stack + abi.stack_reserved_area; - const stack_size = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); + const stack_size = mem.alignForward(u32, total_stack_size, self.stack_align); if (math.cast(i13, stack_size)) |size| { self.mir_instructions.set(save_inst, .{ .tag = .save, @@ -2781,7 +2781,7 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; + const offset = mem.alignForward(u32, self.next_stack_offset, abi_align) + abi_size; self.next_stack_offset = offset; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index aa44dc2bc8..495ca7f6dd 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1286,7 +1286,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // store stack pointer so we can restore it when we return from the function try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } }); // get the total stack size - const aligned_stack = std.mem.alignForwardGeneric(u32, func.stack_size, func.stack_alignment); + const aligned_stack = std.mem.alignForward(u32, func.stack_size, func.stack_alignment); try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } }); // substract it from the current stack pointer try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } }); @@ -1531,7 +1531,7 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { func.stack_alignment = abi_align; } - const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_align); + const offset = std.mem.alignForward(u32, func.stack_size, abi_align); defer func.stack_size = offset + abi_size; return WValue{ .stack_offset = .{ .value = offset, .references = 1 } }; @@ -1564,7 +1564,7 @@ fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { func.stack_alignment = abi_alignment; } - const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_alignment); + const offset = std.mem.alignForward(u32, func.stack_size, abi_alignment); defer func.stack_size = offset + abi_size; return WValue{ .stack_offset = .{ .value = offset, .references = 1 } }; @@ -2975,7 +2975,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - break :blk @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); + break :blk @intCast(u32, std.mem.alignForward(u64, layout.tag_size, layout.tag_align)); }, }, .Pointer => switch (parent_ty.ptrSize(mod)) { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 6e13a55008..a33faecca3 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2150,7 +2150,7 @@ fn setFrameLoc( const frame_i = @enumToInt(frame_index); if (aligned) { const alignment = @as(i32, 1) << self.frame_allocs.items(.abi_align)[frame_i]; - offset.* = mem.alignForwardGeneric(i32, offset.*, alignment); + offset.* = mem.alignForward(i32, offset.*, alignment); } self.frame_locs.set(frame_i, .{ .base = base, .disp = offset.* }); offset.* += self.frame_allocs.items(.abi_size)[frame_i]; @@ -2207,7 +2207,7 @@ fn computeFrameLayout(self: *Self) !FrameLayout { self.setFrameLoc(.stack_frame, .rsp, &rsp_offset, true); for (stack_frame_order) |frame_index| self.setFrameLoc(frame_index, .rsp, &rsp_offset, true); rsp_offset += stack_frame_align_offset; - rsp_offset = mem.alignForwardGeneric(i32, rsp_offset, @as(i32, 1) << needed_align); + rsp_offset = mem.alignForward(i32, rsp_offset, @as(i32, 1) << needed_align); rsp_offset -= stack_frame_align_offset; frame_size[@enumToInt(FrameIndex.call_frame)] = @intCast(u31, rsp_offset - frame_offset[@enumToInt(FrameIndex.stack_frame)]); @@ -11807,7 +11807,7 @@ fn resolveCallingConventionValues( const param_size = @intCast(u31, ty.abiSize(mod)); const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = - mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); + mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ .index = stack_frame_base, .off = result.stack_byte_count, @@ -11847,7 +11847,7 @@ fn resolveCallingConventionValues( const param_size = @intCast(u31, ty.abiSize(mod)); const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = - mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); + mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ .index = stack_frame_base, .off = result.stack_byte_count, @@ -11858,7 +11858,7 @@ fn resolveCallingConventionValues( else => return self.fail("TODO implement function parameters and return values for {} on x86_64", .{cc}), } - result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, result.stack_align); + result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, result.stack_align); return result; } diff --git a/src/codegen.zig b/src/codegen.zig index 6145d8778b..430562fe9b 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -290,7 +290,7 @@ pub fn generateSymbol( .fail => |em| return .{ .fail = em }, } const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padded_end = mem.alignForward(u64, unpadded_end, abi_align); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; if (padding > 0) { @@ -303,7 +303,7 @@ pub fn generateSymbol( const begin = code.items.len; try code.writer().writeInt(u16, err_val, endian); const unpadded_end = code.items.len - begin; - const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); + const padded_end = mem.alignForward(u64, unpadded_end, abi_align); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; if (padding > 0) { @@ -1020,7 +1020,7 @@ pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return 0; } else { - return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align); + return mem.alignForward(u64, Type.anyerror.abiSize(mod), payload_align); } } @@ -1029,7 +1029,7 @@ pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { const payload_align = payload_ty.abiAlignment(mod); const error_align = Type.anyerror.abiAlignment(mod); if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { - return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align); + return mem.alignForward(u64, payload_ty.abiSize(mod), error_align); } else { return 0; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 47be4148d3..11cd752000 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1633,7 +1633,7 @@ pub const Object = struct { var offset: u64 = 0; offset += ptr_size; - offset = std.mem.alignForwardGeneric(u64, offset, len_align); + offset = std.mem.alignForward(u64, offset, len_align); const len_offset = offset; const fields: [2]*llvm.DIType = .{ @@ -1801,7 +1801,7 @@ pub const Object = struct { var offset: u64 = 0; offset += payload_size; - offset = std.mem.alignForwardGeneric(u64, offset, non_null_align); + offset = std.mem.alignForward(u64, offset, non_null_align); const non_null_offset = offset; const fields: [2]*llvm.DIType = .{ @@ -1888,12 +1888,12 @@ pub const Object = struct { error_index = 0; payload_index = 1; error_offset = 0; - payload_offset = std.mem.alignForwardGeneric(u64, error_size, payload_align); + payload_offset = std.mem.alignForward(u64, error_size, payload_align); } else { payload_index = 0; error_index = 1; payload_offset = 0; - error_offset = std.mem.alignForwardGeneric(u64, payload_size, error_align); + error_offset = std.mem.alignForward(u64, payload_size, error_align); } var fields: [2]*llvm.DIType = undefined; @@ -1995,7 +1995,7 @@ pub const Object = struct { const field_size = field_ty.toType().abiSize(mod); const field_align = field_ty.toType().abiAlignment(mod); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_offset = std.mem.alignForward(u64, offset, field_align); offset = field_offset + field_size; const field_name = if (tuple.names.len != 0) @@ -2086,7 +2086,7 @@ pub const Object = struct { const field = field_and_index.field; const field_size = field.ty.abiSize(mod); const field_align = field.alignment(mod, layout); - const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); + const field_offset = std.mem.alignForward(u64, offset, field_align); offset = field_offset + field_size; const field_name = mod.intern_pool.stringToSlice(fields.keys()[field_and_index.index]); @@ -2242,10 +2242,10 @@ pub const Object = struct { var payload_offset: u64 = undefined; if (layout.tag_align >= layout.payload_align) { tag_offset = 0; - payload_offset = std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + payload_offset = std.mem.alignForward(u64, layout.tag_size, layout.payload_align); } else { payload_offset = 0; - tag_offset = std.mem.alignForwardGeneric(u64, layout.payload_size, layout.tag_align); + tag_offset = std.mem.alignForward(u64, layout.payload_size, layout.tag_align); } const tag_di = dib.createMemberType( @@ -2861,9 +2861,9 @@ pub const DeclGen = struct { fields_buf[0] = llvm_error_type; fields_buf[1] = llvm_payload_type; const payload_end = - std.mem.alignForwardGeneric(u64, error_size, payload_align) + + std.mem.alignForward(u64, error_size, payload_align) + payload_size; - const abi_size = std.mem.alignForwardGeneric(u64, payload_end, error_align); + const abi_size = std.mem.alignForward(u64, payload_end, error_align); const padding = @intCast(c_uint, abi_size - payload_end); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2874,9 +2874,9 @@ pub const DeclGen = struct { fields_buf[0] = llvm_payload_type; fields_buf[1] = llvm_error_type; const error_end = - std.mem.alignForwardGeneric(u64, payload_size, error_align) + + std.mem.alignForward(u64, payload_size, error_align) + error_size; - const abi_size = std.mem.alignForwardGeneric(u64, error_end, payload_align); + const abi_size = std.mem.alignForward(u64, error_end, payload_align); const padding = @intCast(c_uint, abi_size - error_end); if (padding == 0) { return dg.context.structType(&fields_buf, 2, .False); @@ -2910,7 +2910,7 @@ pub const DeclGen = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -2924,7 +2924,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -2979,7 +2979,7 @@ pub const DeclGen = struct { field_align < field_ty_align; big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -2993,7 +2993,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -3552,7 +3552,7 @@ pub const DeclGen = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -3575,7 +3575,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -3650,7 +3650,7 @@ pub const DeclGen = struct { const field_align = field.alignment(mod, struct_obj.layout); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -3673,7 +3673,7 @@ pub const DeclGen = struct { } { const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, big_align); + offset = std.mem.alignForward(u64, offset, big_align); const padding_len = offset - prev_offset; if (padding_len > 0) { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); @@ -10274,7 +10274,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { @@ -10308,7 +10308,7 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { const field_align = field.alignment(mod, layout); big_align = @max(big_align, field_align); const prev_offset = offset; - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); const padding_len = offset - prev_offset; if (padding_len > 0) { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4fd91aded4..dc1f23dad4 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -472,12 +472,12 @@ pub const DeclGen = struct { try self.initializers.append(result_id); self.partial_word.len = 0; - self.size = std.mem.alignForwardGeneric(u32, self.size, @sizeOf(Word)); + self.size = std.mem.alignForward(u32, self.size, @sizeOf(Word)); } /// Fill the buffer with undefined values until the size is aligned to `align`. fn fillToAlign(self: *@This(), alignment: u32) !void { - const target_size = std.mem.alignForwardGeneric(u32, self.size, alignment); + const target_size = std.mem.alignForward(u32, self.size, alignment); try self.addUndef(target_size - self.size); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f7785858dd..202bb71e9b 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -437,10 +437,10 @@ fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.Section const vaddr = blk: { if (index == 0) break :blk self.page_size; const prev_header = self.sections.items(.header)[index - 1]; - break :blk mem.alignForwardGeneric(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size); + break :blk mem.alignForward(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size); }; // We commit more memory than needed upfront so that we don't have to reallocate too soon. - const memsz = mem.alignForwardGeneric(u32, size, self.page_size) * 100; + const memsz = mem.alignForward(u32, size, self.page_size) * 100; log.debug("found {s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ name, off, @@ -505,8 +505,8 @@ fn growSection(self: *Coff, sect_id: u32, needed_size: u32) !void { fn growSectionVirtualMemory(self: *Coff, sect_id: u32, needed_size: u32) !void { const header = &self.sections.items(.header)[sect_id]; const increased_size = padToIdeal(needed_size); - const old_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, header.virtual_size, self.page_size); - const new_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, increased_size, self.page_size); + const old_aligned_end = header.virtual_address + mem.alignForward(u32, header.virtual_size, self.page_size); + const new_aligned_end = header.virtual_address + mem.alignForward(u32, increased_size, self.page_size); const diff = new_aligned_end - old_aligned_end; log.debug("growing {s} in virtual memory by {x}", .{ self.getSectionName(header), diff }); @@ -567,7 +567,7 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme const ideal_capacity_end_vaddr = math.add(u32, sym.value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = sym.value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u32, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u32, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the atom that it points to has grown to take up @@ -596,11 +596,11 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme const last_symbol = last.getSymbol(self); const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size; const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u32, ideal_capacity_end_vaddr, alignment); atom_placement = last_index; break :blk new_start_vaddr; } else { - break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment); + break :blk mem.alignForward(u32, header.virtual_address, alignment); } }; @@ -722,7 +722,7 @@ pub fn createAtom(self: *Coff) !Atom.Index { fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value; + const align_ok = mem.alignBackward(u32, sym.value, alignment) == sym.value; const need_realloc = !align_ok or new_atom_size > atom.capacity(self); if (!need_realloc) return sym.value; return self.allocateAtom(atom_index, new_atom_size, alignment); @@ -1798,7 +1798,7 @@ fn writeBaseRelocations(self: *Coff) !void { for (offsets.items) |offset| { const rva = sym.value + offset; - const page = mem.alignBackwardGeneric(u32, rva, self.page_size); + const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); @@ -1819,7 +1819,7 @@ fn writeBaseRelocations(self: *Coff) !void { if (sym.section_number == .UNDEFINED) continue; const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size()); - const page = mem.alignBackwardGeneric(u32, rva, self.page_size); + const page = mem.alignBackward(u32, rva, self.page_size); const gop = try page_table.getOrPut(page); if (!gop.found_existing) { gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa); @@ -1907,7 +1907,7 @@ fn writeImportTables(self: *Coff) !void { lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName); for (itable.entries.items) |entry| { const sym_name = self.getSymbolName(entry); - names_table_size += 2 + mem.alignForwardGeneric(u32, @intCast(u32, sym_name.len + 1), 2); + names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2); } dll_names_size += @intCast(u32, lib_name.len + ext.len + 1); } @@ -2102,7 +2102,7 @@ fn writeHeader(self: *Coff) !void { }; const subsystem: coff.Subsystem = .WINDOWS_CUI; const size_of_image: u32 = self.getSizeOfImage(); - const size_of_headers: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), default_file_alignment); + const size_of_headers: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), default_file_alignment); const image_base = self.getImageBase(); const base_of_code = self.sections.get(self.text_section_index.?).header.virtual_address; @@ -2247,7 +2247,7 @@ fn allocatedSize(self: *Coff, start: u32) u32 { fn findFreeSpace(self: *Coff, object_size: u32, min_alignment: u32) u32 { var start: u32 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u32, item_end, min_alignment); + start = mem.alignForward(u32, item_end, min_alignment); } return start; } @@ -2294,9 +2294,9 @@ inline fn getSectionHeadersOffset(self: Coff) u32 { } inline fn getSizeOfImage(self: Coff) u32 { - var image_size: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), self.page_size); + var image_size: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), self.page_size); for (self.sections.items(.header)) |header| { - image_size += mem.alignForwardGeneric(u32, header.virtual_size, self.page_size); + image_size += mem.alignForward(u32, header.virtual_size, self.page_size); } return image_size; } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index b9b7772260..3cb1c213e9 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2152,7 +2152,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void { di_buf.appendAssumeCapacity(0); // segment_selector_size const end_header_offset = di_buf.items.len; - const begin_entries_offset = mem.alignForward(end_header_offset, ptr_width_bytes * 2); + const begin_entries_offset = mem.alignForward(usize, end_header_offset, ptr_width_bytes * 2); di_buf.appendNTimesAssumeCapacity(0, begin_entries_offset - end_header_offset); // Currently only one compilation unit is supported, so the address range is simply diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 15ba9ebecc..e0d0dfc75f 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -439,7 +439,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 { pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u64, item_end, min_alignment); + start = mem.alignForward(u64, item_end, min_alignment); } return start; } @@ -1173,7 +1173,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align)); } - phdr_table_load.p_offset = mem.alignBackwardGeneric(u64, phdr_table.p_offset, phdr_table_load.p_align); + phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align); const load_align_offset = phdr_table.p_offset - phdr_table_load.p_offset; phdr_table_load.p_filesz = load_align_offset + needed_size; phdr_table_load.p_memsz = load_align_offset + needed_size; @@ -2215,7 +2215,7 @@ fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void { fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value; + const align_ok = mem.alignBackward(u64, sym.st_value, alignment) == sym.st_value; const need_realloc = !align_ok or new_block_size > atom.capacity(self); if (!need_realloc) return sym.st_value; return self.allocateAtom(atom_index, new_block_size, alignment); @@ -2269,7 +2269,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = big_atom_sym.st_value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the block that it points to has grown to take up @@ -2298,7 +2298,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme const last_sym = last.getSymbol(self); const ideal_capacity = padToIdeal(last_sym.st_size); const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment); // Set up the metadata to be updated, after errors are no longer possible. atom_placement = last_index; break :blk new_start_vaddr; diff --git a/src/link/MachO.zig b/src/link/MachO.zig index a3f67bc70a..024fe1f8d9 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1777,7 +1777,7 @@ fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void { fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 { const atom = self.getAtom(atom_index); const sym = atom.getSymbol(self); - const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value; + const align_ok = mem.alignBackward(u64, sym.n_value, alignment) == sym.n_value; const need_realloc = !align_ok or new_atom_size > atom.capacity(self); if (!need_realloc) return sym.n_value; return self.allocateAtom(atom_index, new_atom_size, alignment); @@ -2598,7 +2598,7 @@ fn populateMissingMetadata(self: *MachO) !void { // The first __TEXT segment is immovable and covers MachO header and load commands. self.header_segment_cmd_index = @intCast(u8, self.segments.items.len); const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size); - const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); + const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __TEXT segment (header-only) free space 0x{x} to 0x{x}", .{ 0, needed_size }); @@ -2735,7 +2735,7 @@ fn populateMissingMetadata(self: *MachO) !void { fn calcPagezeroSize(self: *MachO) u64 { const pagezero_vmsize = self.base.options.pagezero_size orelse default_pagezero_vmsize; - const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size); + const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size); if (self.base.options.output_mode == .Lib) return 0; if (aligned_pagezero_vmsize == 0) return 0; if (aligned_pagezero_vmsize != pagezero_vmsize) { @@ -2759,10 +2759,10 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts const section_id = @intCast(u8, self.sections.slice().len); const vmaddr = blk: { const prev_segment = self.segments.items[segment_id - 1]; - break :blk mem.alignForwardGeneric(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); + break :blk mem.alignForward(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size); }; // We commit more memory than needed upfront so that we don't have to reallocate too soon. - const vmsize = mem.alignForwardGeneric(u64, opts.size, self.page_size); + const vmsize = mem.alignForward(u64, opts.size, self.page_size); const off = self.findFreeSpace(opts.size, self.page_size); log.debug("found {s},{s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ @@ -2790,8 +2790,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts var section = macho.section_64{ .sectname = makeStaticString(sectname), .segname = makeStaticString(segname), - .addr = mem.alignForwardGeneric(u64, vmaddr, opts.alignment), - .offset = mem.alignForwardGeneric(u32, @intCast(u32, off), opts.alignment), + .addr = mem.alignForward(u64, vmaddr, opts.alignment), + .offset = mem.alignForward(u32, @intCast(u32, off), opts.alignment), .size = opts.size, .@"align" = math.log2(opts.alignment), .flags = opts.flags, @@ -2846,8 +2846,8 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void { } header.size = needed_size; - segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size); - segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size); + segment.filesize = mem.alignForward(u64, needed_size, self.page_size); + segment.vmsize = mem.alignForward(u64, needed_size, self.page_size); } fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { @@ -2855,7 +2855,7 @@ fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void { const segment = self.getSegmentPtr(sect_id); const increased_size = padToIdeal(needed_size); const old_aligned_end = segment.vmaddr + segment.vmsize; - const new_aligned_end = segment.vmaddr + mem.alignForwardGeneric(u64, increased_size, self.page_size); + const new_aligned_end = segment.vmaddr + mem.alignForward(u64, increased_size, self.page_size); const diff = new_aligned_end - old_aligned_end; log.debug("shifting every segment after {s},{s} in virtual memory by {x}", .{ header.segName(), @@ -2927,7 +2927,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = sym.n_value + capacity; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment); + const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the atom that it points to has grown to take up @@ -2956,11 +2956,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm const last_symbol = last.getSymbol(self); const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size; const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity; - const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment); + const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment); atom_placement = last_index; break :blk new_start_vaddr; } else { - break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment); + break :blk mem.alignForward(u64, segment.vmaddr, alignment); } }; @@ -3034,17 +3034,17 @@ fn writeLinkeditSegmentData(self: *MachO) !void { for (self.segments.items, 0..) |segment, id| { if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue; if (seg.vmaddr < segment.vmaddr + segment.vmsize) { - seg.vmaddr = mem.alignForwardGeneric(u64, segment.vmaddr + segment.vmsize, self.page_size); + seg.vmaddr = mem.alignForward(u64, segment.vmaddr + segment.vmsize, self.page_size); } if (seg.fileoff < segment.fileoff + segment.filesize) { - seg.fileoff = mem.alignForwardGeneric(u64, segment.fileoff + segment.filesize, self.page_size); + seg.fileoff = mem.alignForward(u64, segment.fileoff + segment.filesize, self.page_size); } } try self.writeDyldInfoData(); try self.writeSymtabs(); - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); } fn collectRebaseDataFromTableSection(self: *MachO, sect_id: u8, rebase: *Rebase, table: anytype) !void { @@ -3236,17 +3236,17 @@ fn writeDyldInfoData(self: *MachO) !void { assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64))); const rebase_off = link_seg.fileoff; const rebase_size = rebase.size(); - const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64)); + const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64)); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned }); const bind_off = rebase_off + rebase_size_aligned; const bind_size = bind.size(); - const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64)); + const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64)); log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned }); const lazy_bind_off = bind_off + bind_size_aligned; const lazy_bind_size = lazy_bind.size(); - const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64)); + const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64)); log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size_aligned, @@ -3254,7 +3254,7 @@ fn writeDyldInfoData(self: *MachO) !void { const export_off = lazy_bind_off + lazy_bind_size_aligned; const export_size = trie.size; - const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64)); + const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64)); log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned }); const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse @@ -3412,7 +3412,7 @@ fn writeStrtab(self: *MachO) !void { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = self.strtab.buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -3447,7 +3447,7 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = nindirectsyms * @sizeOf(u32); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -3514,10 +3514,10 @@ fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void { const seg = self.getLinkeditSegmentPtr(); // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 - const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16); + const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16); const needed_size = code_sig.estimateSize(offset); seg.filesize = offset + needed_size - seg.fileoff; - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. @@ -3630,7 +3630,7 @@ fn allocatedSize(self: *MachO, start: u64) u64 { fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { - start = mem.alignForwardGeneric(u64, item_end, min_alignment); + start = mem.alignForward(u64, item_end, min_alignment); } return start; } diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 4709560ba7..02511dbe29 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -282,7 +282,7 @@ pub fn writeAdhocSignature( self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0; self.code_directory.inner.codeLimit = opts.file_size; - const total_pages = @intCast(u32, mem.alignForward(opts.file_size, self.page_size) / self.page_size); + const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size); try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages); self.code_directory.code_slots.items.len = total_pages; @@ -357,7 +357,7 @@ fn parallelHash( ) !void { var wg: WaitGroup = .{}; - const total_num_chunks = mem.alignForward(file_size, self.page_size) / self.page_size; + const total_num_chunks = mem.alignForward(usize, file_size, self.page_size) / self.page_size; assert(self.code_directory.code_slots.items.len >= total_num_chunks); const buffer = try gpa.alloc(u8, self.page_size * total_num_chunks); @@ -421,7 +421,7 @@ pub fn size(self: CodeSignature) u32 { pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { var ssize: u64 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size(); // Approx code slots - const total_pages = mem.alignForwardGeneric(u64, file_size, self.page_size) / self.page_size; + const total_pages = mem.alignForward(u64, file_size, self.page_size) / self.page_size; ssize += total_pages * hash_size; var n_special_slots: u32 = 0; if (self.requirements) |req| { @@ -436,7 +436,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 { ssize += @sizeOf(macho.BlobIndex) + sig.size(); } ssize += n_special_slots * hash_size; - return @intCast(u32, mem.alignForwardGeneric(u64, ssize, @sizeOf(u64))); + return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64))); } pub fn clear(self: *CodeSignature, allocator: Allocator) void { diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 24a0c9ea34..fdb8c9c816 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -68,7 +68,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void { const off = @intCast(u64, self.page_size); const ideal_size: u16 = 200 + 128 + 160 + 250; - const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size); + const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size); log.debug("found __DWARF segment free space 0x{x} to 0x{x}", .{ off, off + needed_size }); @@ -213,7 +213,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 const segment = self.getDwarfSegmentPtr(); var offset: u64 = segment.fileoff; while (self.detectAllocCollision(offset, object_size)) |item_end| { - offset = mem.alignForwardGeneric(u64, item_end, min_alignment); + offset = mem.alignForward(u64, item_end, min_alignment); } return offset; } @@ -355,18 +355,18 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void { file_size = @max(file_size, header.offset + header.size); } - const aligned_size = mem.alignForwardGeneric(u64, file_size, self.page_size); + const aligned_size = mem.alignForward(u64, file_size, self.page_size); dwarf_segment.vmaddr = base_vmaddr; dwarf_segment.filesize = aligned_size; dwarf_segment.vmsize = aligned_size; const linkedit = self.getLinkeditSegmentPtr(); - linkedit.vmaddr = mem.alignForwardGeneric( + linkedit.vmaddr = mem.alignForward( u64, dwarf_segment.vmaddr + aligned_size, self.page_size, ); - linkedit.fileoff = mem.alignForwardGeneric( + linkedit.fileoff = mem.alignForward( u64, dwarf_segment.fileoff + aligned_size, self.page_size, @@ -458,7 +458,7 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void { try self.writeStrtab(); const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; - const aligned_size = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + const aligned_size = mem.alignForward(u64, seg.filesize, self.page_size); seg.vmsize = aligned_size; } @@ -497,7 +497,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void { const nsyms = nlocals + nexports; const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; - const offset = mem.alignForwardGeneric(u64, seg.fileoff, @alignOf(macho.nlist_64)); + const offset = mem.alignForward(u64, seg.fileoff, @alignOf(macho.nlist_64)); const needed_size = nsyms * @sizeOf(macho.nlist_64); seg.filesize = offset + needed_size - seg.fileoff; @@ -522,8 +522,8 @@ fn writeStrtab(self: *DebugSymbols) !void { const seg = &self.segments.items[self.linkedit_segment_cmd_index.?]; const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64)); - const offset = mem.alignForwardGeneric(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); - const needed_size = mem.alignForwardGeneric(u64, self.strtab.buffer.items.len, @alignOf(u64)); + const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64)); + const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64)); seg.filesize = offset + needed_size - seg.fileoff; self.symtab_cmd.stroff = @intCast(u32, offset); diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig index 228a1ccfaf..5111f53f2a 100644 --- a/src/link/MachO/load_commands.zig +++ b/src/link/MachO/load_commands.zig @@ -17,7 +17,7 @@ pub const default_dyld_path: [*:0]const u8 = "/usr/lib/dyld"; fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool) u64 { const darwin_path_max = 1024; const name_len = if (assume_max_path_len) darwin_path_max else name.len + 1; - return mem.alignForwardGeneric(u64, cmd_size + name_len, @alignOf(u64)); + return mem.alignForward(u64, cmd_size + name_len, @alignOf(u64)); } const CalcLCsSizeCtx = struct { @@ -149,7 +149,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 { pub fn writeDylinkerLC(lc_writer: anytype) !void { const name_len = mem.sliceTo(default_dyld_path, 0).len; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.dylinker_command) + name_len, @sizeOf(u64), @@ -176,7 +176,7 @@ const WriteDylibLCCtx = struct { fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void { const name_len = ctx.name.len + 1; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.dylib_command) + name_len, @sizeOf(u64), @@ -253,7 +253,7 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an while (try it.next()) |rpath| { const rpath_len = rpath.len + 1; - const cmdsize = @intCast(u32, mem.alignForwardGeneric( + const cmdsize = @intCast(u32, mem.alignForward( u64, @sizeOf(macho.rpath_command) + rpath_len, @sizeOf(u64), diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig index 48d1faac6b..7895190005 100644 --- a/src/link/MachO/thunks.zig +++ b/src/link/MachO/thunks.zig @@ -109,7 +109,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { while (true) { const atom = zld.getAtom(group_end); - offset = mem.alignForwardGeneric(u64, offset, try math.powi(u32, 2, atom.alignment)); + offset = mem.alignForward(u64, offset, try math.powi(u32, 2, atom.alignment)); const sym = zld.getSymbolPtr(atom.getSymbolWithLoc()); sym.n_value = offset; @@ -153,7 +153,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void { } else break; } - offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment()); + offset = mem.alignForward(u64, offset, Thunk.getAlignment()); allocateThunk(zld, thunk_index, offset, header); offset += zld.thunks.items[thunk_index].getSize(); @@ -193,7 +193,7 @@ fn allocateThunk( var offset = base_offset; while (true) { const atom = zld.getAtom(atom_index); - offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment()); + offset = mem.alignForward(u64, offset, Thunk.getAlignment()); const sym = zld.getSymbolPtr(atom.getSymbolWithLoc()); sym.n_value = offset; diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig index 4f7e615c79..7902d67d87 100644 --- a/src/link/MachO/zld.zig +++ b/src/link/MachO/zld.zig @@ -1207,7 +1207,7 @@ pub const Zld = struct { fn createSegments(self: *Zld) !void { const pagezero_vmsize = self.options.pagezero_size orelse MachO.default_pagezero_vmsize; - const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size); + const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size); if (self.options.output_mode != .Lib and aligned_pagezero_vmsize > 0) { if (aligned_pagezero_vmsize != pagezero_vmsize) { log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_vmsize}); @@ -1466,7 +1466,7 @@ pub const Zld = struct { while (true) { const atom = self.getAtom(atom_index); const atom_alignment = try math.powi(u32, 2, atom.alignment); - const atom_offset = mem.alignForwardGeneric(u64, header.size, atom_alignment); + const atom_offset = mem.alignForward(u64, header.size, atom_alignment); const padding = atom_offset - header.size; const sym = self.getSymbolPtr(atom.getSymbolWithLoc()); @@ -1534,7 +1534,7 @@ pub const Zld = struct { const slice = self.sections.slice(); for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| { const alignment = try math.powi(u32, 2, header.@"align"); - const start_aligned = mem.alignForwardGeneric(u64, start, alignment); + const start_aligned = mem.alignForward(u64, start, alignment); const n_sect = @intCast(u8, indexes.start + sect_id + 1); header.offset = if (header.isZerofill()) @@ -1598,8 +1598,8 @@ pub const Zld = struct { segment.vmsize = start; } - segment.filesize = mem.alignForwardGeneric(u64, segment.filesize, self.page_size); - segment.vmsize = mem.alignForwardGeneric(u64, segment.vmsize, self.page_size); + segment.filesize = mem.alignForward(u64, segment.filesize, self.page_size); + segment.vmsize = mem.alignForward(u64, segment.vmsize, self.page_size); } const InitSectionOpts = struct { @@ -1709,7 +1709,7 @@ pub const Zld = struct { try self.writeSymtabs(); const seg = self.getLinkeditSegmentPtr(); - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); } fn collectRebaseDataFromContainer( @@ -2112,17 +2112,17 @@ pub const Zld = struct { assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64))); const rebase_off = link_seg.fileoff; const rebase_size = rebase.size(); - const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64)); + const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64)); log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned }); const bind_off = rebase_off + rebase_size_aligned; const bind_size = bind.size(); - const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64)); + const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64)); log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned }); const lazy_bind_off = bind_off + bind_size_aligned; const lazy_bind_size = lazy_bind.size(); - const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64)); + const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64)); log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size_aligned, @@ -2130,7 +2130,7 @@ pub const Zld = struct { const export_off = lazy_bind_off + lazy_bind_size_aligned; const export_size = trie.size; - const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64)); + const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64)); log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned }); const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse @@ -2268,7 +2268,7 @@ pub const Zld = struct { const offset = link_seg.fileoff + link_seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow; if (padding > 0) { try buffer.ensureUnusedCapacity(padding); @@ -2347,7 +2347,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = out_dice.items.len * @sizeOf(macho.data_in_code_entry); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow); @@ -2480,7 +2480,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = self.strtab.buffer.items.len; - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -2515,7 +2515,7 @@ pub const Zld = struct { const offset = seg.fileoff + seg.filesize; assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64))); const needed_size = nindirectsyms * @sizeOf(u32); - const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64)); + const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64)); seg.filesize = offset + needed_size_aligned - seg.fileoff; log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned }); @@ -2690,7 +2690,7 @@ pub const Zld = struct { for (subsections[0..count]) |cut| { const size = cut.end - cut.start; - const num_chunks = mem.alignForward(size, chunk_size) / chunk_size; + const num_chunks = mem.alignForward(usize, size, chunk_size) / chunk_size; var i: usize = 0; while (i < num_chunks) : (i += 1) { @@ -2725,10 +2725,10 @@ pub const Zld = struct { const seg = self.getLinkeditSegmentPtr(); // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271 - const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16); + const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16); const needed_size = code_sig.estimateSize(offset); seg.filesize = offset + needed_size - seg.fileoff; - seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size); + seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size); log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size }); // Pad out the space. We need to do this to calculate valid hashes for everything in the file // except for code signature data. diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index f911074473..2d2930be8c 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2118,7 +2118,7 @@ fn allocateAtoms(wasm: *Wasm) !void { } } } - offset = std.mem.alignForwardGeneric(u32, offset, atom.alignment); + offset = std.mem.alignForward(u32, offset, atom.alignment); atom.offset = offset; log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{ symbol_loc.getName(wasm), @@ -2129,7 +2129,7 @@ fn allocateAtoms(wasm: *Wasm) !void { offset += atom.size; atom_index = atom.prev orelse break; } - segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment); + segment.size = std.mem.alignForward(u32, offset, segment.alignment); } } @@ -2731,7 +2731,7 @@ fn setupMemory(wasm: *Wasm) !void { const is_obj = wasm.base.options.output_mode == .Obj; if (place_stack_first and !is_obj) { - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; // We always put the stack pointer global at index 0 wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); @@ -2741,7 +2741,7 @@ fn setupMemory(wasm: *Wasm) !void { var data_seg_it = wasm.data_segments.iterator(); while (data_seg_it.next()) |entry| { const segment = &wasm.segments.items[entry.value_ptr.*]; - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, segment.alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, segment.alignment); // set TLS-related symbols if (mem.eql(u8, entry.key_ptr.*, ".tdata")) { @@ -2779,7 +2779,7 @@ fn setupMemory(wasm: *Wasm) !void { // create the memory init flag which is used by the init memory function if (wasm.base.options.shared_memory and wasm.hasPassiveInitializationSegments()) { // align to pointer size - memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, 4); + memory_ptr = mem.alignForward(u64, memory_ptr, 4); const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data); const sym = loc.getSymbol(wasm); sym.virtual_address = @intCast(u32, memory_ptr); @@ -2787,7 +2787,7 @@ fn setupMemory(wasm: *Wasm) !void { } if (!place_stack_first and !is_obj) { - memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment); + memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment); memory_ptr += stack_size; wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr)); } @@ -2796,7 +2796,7 @@ fn setupMemory(wasm: *Wasm) !void { // We must set its virtual address so it can be used in relocations. if (wasm.findGlobalSymbol("__heap_base")) |loc| { const symbol = loc.getSymbol(wasm); - symbol.virtual_address = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment)); + symbol.virtual_address = @intCast(u32, mem.alignForward(u64, memory_ptr, heap_alignment)); } // Setup the max amount of pages @@ -2818,7 +2818,7 @@ fn setupMemory(wasm: *Wasm) !void { } memory_ptr = initial_memory; } - memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, std.wasm.page_size); + memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size); // In case we do not import memory, but define it ourselves, // set the minimum amount of pages on the memory section. wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size); diff --git a/src/objcopy.zig b/src/objcopy.zig index c5d0e8dcb3..014208cc0d 100644 --- a/src/objcopy.zig +++ b/src/objcopy.zig @@ -1024,7 +1024,7 @@ fn ElfFile(comptime is_64: bool) type { dest.sh_size = @intCast(Elf_OffSize, data.len); const addralign = if (src.sh_addralign == 0 or dest.sh_type == elf.SHT_NOBITS) 1 else src.sh_addralign; - dest.sh_offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, addralign); + dest.sh_offset = std.mem.alignForward(Elf_OffSize, eof_offset, addralign); if (src.sh_offset != dest.sh_offset and section.segment != null and update.action != .empty and dest.sh_type != elf.SHT_NOTE) { if (src.sh_offset > dest.sh_offset) { dest.sh_offset = src.sh_offset; // add padding to avoid modifing the program segments @@ -1085,7 +1085,7 @@ fn ElfFile(comptime is_64: bool) type { // add a ".gnu_debuglink" section if (options.debuglink) |link| { const payload = payload: { - const crc_offset = std.mem.alignForward(link.name.len + 1, 4); + const crc_offset = std.mem.alignForward(usize, link.name.len + 1, 4); const buf = try allocator.alignedAlloc(u8, 4, crc_offset + 4); @memcpy(buf[0..link.name.len], link.name); @memset(buf[link.name.len..crc_offset], 0); @@ -1117,7 +1117,7 @@ fn ElfFile(comptime is_64: bool) type { // write the section header at the tail { - const offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr)); + const offset = std.mem.alignForward(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr)); const data = std.mem.sliceAsBytes(updated_section_header); assert(data.len == @as(usize, updated_elf_header.e_shentsize) * new_shnum); diff --git a/src/type.zig b/src/type.zig index bb82a50682..1c3435dafd 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1339,7 +1339,7 @@ pub const Type = struct { .storage = .{ .lazy_size = ty.toIntern() }, } })).toValue() }, }; - const result = std.mem.alignForwardGeneric(u32, total_bytes, alignment); + const result = std.mem.alignForward(u32, total_bytes, alignment); return AbiSizeAdvanced{ .scalar = result }; }, @@ -1380,14 +1380,14 @@ pub const Type = struct { var size: u64 = 0; if (code_align > payload_align) { size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); + size = std.mem.alignForward(u64, size, code_align); } else { size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, code_align); + size = std.mem.alignForward(u64, size, code_align); size += code_size; - size = std.mem.alignForwardGeneric(u64, size, payload_align); + size = std.mem.alignForward(u64, size, payload_align); } return AbiSizeAdvanced{ .scalar = size }; }, @@ -1595,7 +1595,7 @@ pub const Type = struct { fn intAbiSize(bits: u16, target: Target) u64 { const alignment = intAbiAlignment(bits, target); - return std.mem.alignForwardGeneric(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); + return std.mem.alignForward(u64, @intCast(u16, (@as(u17, bits) + 7) / 8), alignment); } fn intAbiAlignment(bits: u16, target: Target) u32 { @@ -3194,7 +3194,7 @@ pub const Type = struct { const field_align = field.alignment(mod, it.struct_obj.layout); it.big_align = @max(it.big_align, field_align); - const field_offset = std.mem.alignForwardGeneric(u64, it.offset, field_align); + const field_offset = std.mem.alignForward(u64, it.offset, field_align); it.offset = field_offset + field.ty.abiSize(mod); return FieldOffset{ .field = i, .offset = field_offset }; } @@ -3223,7 +3223,7 @@ pub const Type = struct { return field_offset.offset; } - return std.mem.alignForwardGeneric(u64, it.offset, @max(it.big_align, 1)); + return std.mem.alignForward(u64, it.offset, @max(it.big_align, 1)); }, .anon_struct_type => |tuple| { @@ -3239,11 +3239,11 @@ pub const Type = struct { const field_align = field_ty.toType().abiAlignment(mod); big_align = @max(big_align, field_align); - offset = std.mem.alignForwardGeneric(u64, offset, field_align); + offset = std.mem.alignForward(u64, offset, field_align); if (i == index) return offset; offset += field_ty.toType().abiSize(mod); } - offset = std.mem.alignForwardGeneric(u64, offset, @max(big_align, 1)); + offset = std.mem.alignForward(u64, offset, @max(big_align, 1)); return offset; }, @@ -3254,7 +3254,7 @@ pub const Type = struct { const layout = union_obj.getLayout(mod, true); if (layout.tag_align >= layout.payload_align) { // {Tag, Payload} - return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align); + return std.mem.alignForward(u64, layout.tag_size, layout.payload_align); } else { // {Payload, Tag} return 0; -- cgit v1.2.3